- qcom,sc8280xp-edp
- qcom,sdm845-dp
- qcom,sm8350-dp
+ - qcom,sm8650-dp
- items:
- enum:
- qcom,sm8250-dp
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
- qcom,sdm660-dsi-ctrl
+ - qcom,sdm670-dsi-ctrl
- qcom,sdm845-dsi-ctrl
- qcom,sm6115-dsi-ctrl
- qcom,sm6125-dsi-ctrl
- qcom,sm8350-dsi-ctrl
- qcom,sm8450-dsi-ctrl
- qcom,sm8550-dsi-ctrl
+ - qcom,sm8650-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
- enum:
- qcom,dsi-ctrl-6g-qcm2290
- qcom,sm8350-dsi-ctrl
- qcom,sm8450-dsi-ctrl
- qcom,sm8550-dsi-ctrl
+ - qcom,sm8650-dsi-ctrl
then:
properties:
clocks:
- qcom,sm8350-dsi-phy-5nm
- qcom,sm8450-dsi-phy-5nm
- qcom,sm8550-dsi-phy-4nm
+ - qcom,sm8650-dsi-phy-4nm
reg:
items:
ranges: true
+ # This is not a perfect description, but it's impossible to discern and match
+ # the entries like we do with interconnect-names
interconnects:
minItems: 1
items:
- description: Interconnect path from mdp0 (or a single mdp) port to the data bus
- description: Interconnect path from mdp1 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- minItems: 1
- items:
- - const: mdp0-mem
- - const: mdp1-mem
+ oneOf:
+ - minItems: 1
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
+
+ - minItems: 2
+ items:
+ - const: mdp0-mem
+ - const: mdp1-mem
+ - const: cpu-cfg
resets:
items:
maxItems: 2
interconnects:
- maxItems: 1
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 1
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
properties:
compatible:
- const: qcom,dsi-ctrl-6g-qcm2290
+ items:
+ - const: qcom,qcm2290-dsi-ctrl
+ - const: qcom,mdss-dsi-ctrl
"^phy@[0-9a-f]+$":
type: object
interrupt-controller;
#interrupt-cells = <1>;
- interconnects = <&mmrt_virt MASTER_MDP0 &bimc SLAVE_EBI1>;
- interconnect-names = "mdp0-mem";
+ interconnects = <&mmrt_virt MASTER_MDP0 &bimc SLAVE_EBI1>,
+ <&bimc MASTER_APPSS_PROC &config_noc SLAVE_DISPLAY_CFG>;
+ interconnect-names = "mdp0-mem",
+ "cpu-cfg";
iommus = <&apps_smmu 0x420 0x2>,
<&apps_smmu 0x421 0x0>;
};
dsi@5e94000 {
- compatible = "qcom,dsi-ctrl-6g-qcm2290";
+ compatible = "qcom,qcm2290-dsi-ctrl",
+ "qcom,mdss-dsi-ctrl";
reg = <0x05e94000 0x400>;
reg-names = "dsi_ctrl";
maxItems: 1
interconnects:
- maxItems: 1
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 1
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
interrupt-controller;
#interrupt-cells = <1>;
- interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>;
- interconnect-names = "mdp0-mem";
+ interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_DISPLAY_CFG>;
+ interconnect-names = "mdp0-mem",
+ "cpu-cfg";
iommus = <&apps_smmu 0x800 0x2>;
ranges;
maxItems: 1
interconnects:
- maxItems: 1
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 1
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
interrupt-controller;
#interrupt-cells = <1>;
- interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>;
- interconnect-names = "mdp0-mem";
+ interconnects = <&mmss_noc MASTER_MDP0 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &cnoc2 SLAVE_DISPLAY_CFG>;
+ interconnect-names = "mdp0-mem",
+ "cpu-cfg";
iommus = <&apps_smmu 0x900 0x402>;
ranges;
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/msm/qcom,sdm670-mdss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SDM670 Display MDSS
+
+maintainers:
+
+description:
+ SDM670 MSM Mobile Display Subsystem (MDSS), which encapsulates sub-blocks
+ like DPU display controller, DSI and DP interfaces etc.
+
+$ref: /schemas/display/msm/mdss-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sdm670-mdss
+
+ clocks:
+ items:
+ - description: Display AHB clock from gcc
+ - description: Display core clock
+
+ clock-names:
+ items:
+ - const: iface
+ - const: core
+
+ iommus:
+ maxItems: 2
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ maxItems: 2
+
+patternProperties:
+ "^display-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: qcom,sdm670-dpu
+
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: qcom,sdm670-dp
+
+ "^dsi@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ contains:
+ const: qcom,sdm670-dsi-ctrl
+
+ "^phy@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ const: qcom,dsi-phy-10nm
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,sdm670-rpmh.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ display-subsystem@ae00000 {
+ compatible = "qcom,sdm670-mdss";
+ reg = <0x0ae00000 0x1000>;
+ reg-names = "mdss";
+ power-domains = <&dispcc MDSS_GDSC>;
+
+ clocks = <&gcc GCC_DISP_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>;
+ clock-names = "iface", "core";
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interconnects = <&mmss_noc MASTER_MDP_PORT0 0 &mem_noc SLAVE_EBI_CH0 0>,
+ <&mmss_noc MASTER_MDP_PORT1 0 &mem_noc SLAVE_EBI_CH0 0>;
+ interconnect-names = "mdp0-mem", "mdp1-mem";
+
+ iommus = <&apps_smmu 0x880 0x8>,
+ <&apps_smmu 0xc80 0x8>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ display-controller@ae01000 {
+ compatible = "qcom,sdm670-dpu";
+ reg = <0x0ae01000 0x8f000>,
+ <0x0aeb0000 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&gcc GCC_DISP_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>,
+ <&dispcc DISP_CC_MDSS_MDP_CLK>,
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ clock-names = "gcc-bus", "iface", "bus", "core", "vsync";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+ power-domains = <&rpmhpd SDM670_CX>;
+ operating-points-v2 = <&mdp_opp_table>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&mdss_dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&mdss_dsi1_in>;
+ };
+ };
+ };
+ };
+
+ dsi@ae94000 {
+ compatible = "qcom,sdm670-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae94000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC0_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+ assigned-clock-parents = <&mdss_dsi0_phy 0>, <&mdss_dsi0_phy 1>;
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd SDM670_CX>;
+
+ phys = <&mdss_dsi0_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss_dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss_dsi0_out: endpoint {
+ };
+ };
+ };
+ };
+
+ mdss_dsi0_phy: phy@ae94400 {
+ compatible = "qcom,dsi-phy-10nm";
+ reg = <0x0ae94400 0x200>,
+ <0x0ae94600 0x280>,
+ <0x0ae94a00 0x1e0>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+ vdds-supply = <&vreg_dsi_phy>;
+ };
+
+ dsi@ae96000 {
+ compatible = "qcom,sdm670-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae96000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5>;
+
+ clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>,
+ <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK>,
+ <&dispcc DISP_CC_MDSS_ESC1_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&dispcc DISP_CC_MDSS_AXI_CLK>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+ assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+ <&dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>;
+ assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>;
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd SDM670_CX>;
+
+ phys = <&dsi1_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdss_dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mdss_dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ mdss_dsi1_phy: phy@ae96400 {
+ compatible = "qcom,dsi-phy-10nm";
+ reg = <0x0ae96400 0x200>,
+ <0x0ae96600 0x280>,
+ <0x0ae96a00 0x10e>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
+ vdds-supply = <&vreg_dsi_phy>;
+ };
+ };
+...
properties:
compatible:
- const: qcom,sdm845-dpu
+ enum:
+ - qcom,sdm670-dpu
+ - qcom,sdm845-dpu
reg:
items:
iommus:
maxItems: 2
+ interconnects:
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
+
+ interconnect-names:
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
+
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
maxItems: 1
interconnects:
- maxItems: 2
+ items:
+ - description: Interconnect path from mdp0 port to the data bus
+ - description: Interconnect path from CPU to the reg bus
interconnect-names:
- maxItems: 2
+ items:
+ - const: mdp0-mem
+ - const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
properties:
compatible:
- const: qcom,dsi-phy-7nm
+ const: qcom,dsi-phy-7nm-8150
unevaluatedProperties: false
};
dsi0_phy: phy@ae94400 {
- compatible = "qcom,dsi-phy-7nm";
+ compatible = "qcom,dsi-phy-7nm-8150";
reg = <0x0ae94400 0x200>,
<0x0ae94600 0x280>,
<0x0ae94900 0x260>;
};
dsi1_phy: phy@ae96400 {
- compatible = "qcom,dsi-phy-7nm";
+ compatible = "qcom,dsi-phy-7nm-8150";
reg = <0x0ae96400 0x200>,
<0x0ae96600 0x280>,
<0x0ae96900 0x260>;
compatible:
const: qcom,sm8250-dpu
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ additionalProperties: true
+
+ properties:
+ compatible:
+ items:
+ - const: qcom,sm8250-dp
+ - const: qcom,sm8350-dp
+
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
maxItems: 1
interconnects:
- maxItems: 2
+ maxItems: 3
interconnect-names:
- maxItems: 2
+ maxItems: 3
patternProperties:
"^display-controller@[0-9a-f]+$":
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
- interconnects = <&mmss_noc MASTER_MDP_DISP 0 &mc_virt SLAVE_EBI1_DISP 0>,
- <&mmss_noc MASTER_MDP_DISP 0 &mc_virt SLAVE_EBI1_DISP 0>;
- interconnect-names = "mdp0-mem", "mdp1-mem";
+ interconnects = <&mmss_noc MASTER_MDP_DISP &mc_virt SLAVE_EBI1_DISP>,
+ <&mmss_noc MASTER_MDP_DISP &mc_virt SLAVE_EBI1_DISP>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_DISPLAY_CFG>;
+ interconnect-names = "mdp0-mem",
+ "mdp1-mem",
+ "cpu-cfg";
resets = <&dispcc DISP_CC_MDSS_CORE_BCR>;
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/msm/qcom,sm8650-dpu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM8650 Display DPU
+
+maintainers:
+
+$ref: /schemas/display/msm/dpu-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm8650-dpu
+
+ reg:
+ items:
+ - description: Address offset and size for mdp register set
+ - description: Address offset and size for vbif register set
+
+ reg-names:
+ items:
+ - const: mdp
+ - const: vbif
+
+ clocks:
+ items:
+ - description: Display hf axi
+ - description: Display MDSS ahb
+ - description: Display lut
+ - description: Display core
+ - description: Display vsync
+
+ clock-names:
+ items:
+ - const: nrt_bus
+ - const: iface
+ - const: lut
+ - const: core
+ - const: vsync
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom,rpmhpd.h>
+
+ display-controller@ae01000 {
+ compatible = "qcom,sm8650-dpu";
+ reg = <0x0ae01000 0x8f000>,
+ <0x0aeb0000 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&gcc_axi_clk>,
+ <&dispcc_ahb_clk>,
+ <&dispcc_mdp_lut_clk>,
+ <&dispcc_mdp_clk>,
+ <&dispcc_vsync_clk>;
+ clock-names = "nrt_bus",
+ "iface",
+ "lut",
+ "core",
+ "vsync";
+
+ assigned-clocks = <&dispcc_vsync_clk>;
+ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-325000000 {
+ opp-hz = /bits/ 64 <325000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-375000000 {
+ opp-hz = /bits/ 64 <375000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-514000000 {
+ opp-hz = /bits/ 64 <514000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+...
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/msm/qcom,sm8650-mdss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM8650 Display MDSS
+
+maintainers:
+
+description:
+ SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
+ DPU display controller, DSI and DP interfaces etc.
+
+$ref: /schemas/display/msm/mdss-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm8650-mdss
+
+ clocks:
+ items:
+ - description: Display AHB
+ - description: Display hf AXI
+ - description: Display core
+
+ iommus:
+ maxItems: 1
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ maxItems: 2
+
+patternProperties:
+ "^display-controller@[0-9a-f]+$":
+ type: object
+ properties:
+ compatible:
+ const: qcom,sm8650-dpu
+
+ "^displayport-controller@[0-9a-f]+$":
+ type: object
+ properties:
+ compatible:
+ const: qcom,sm8650-dp
+
+ "^dsi@[0-9a-f]+$":
+ type: object
+ properties:
+ compatible:
+ items:
+ - const: qcom,sm8650-dsi-ctrl
+ - const: qcom,mdss-dsi-ctrl
+
+ "^phy@[0-9a-f]+$":
+ type: object
+ properties:
+ compatible:
+ const: qcom,sm8650-dsi-phy-4nm
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom,rpmhpd.h>
+
+ display-subsystem@ae00000 {
+ compatible = "qcom,sm8650-mdss";
+ reg = <0x0ae00000 0x1000>;
+ reg-names = "mdss";
+
+ resets = <&dispcc_core_bcr>;
+
+ power-domains = <&dispcc_gdsc>;
+
+ clocks = <&gcc_ahb_clk>,
+ <&gcc_axi_clk>,
+ <&dispcc_mdp_clk>;
+ clock-names = "bus", "nrt_bus", "core";
+
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ iommus = <&apps_smmu 0x1c00 0x2>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ display-controller@ae01000 {
+ compatible = "qcom,sm8650-dpu";
+ reg = <0x0ae01000 0x8f000>,
+ <0x0aeb0000 0x2008>;
+ reg-names = "mdp", "vbif";
+
+ clocks = <&gcc_axi_clk>,
+ <&dispcc_ahb_clk>,
+ <&dispcc_mdp_lut_clk>,
+ <&dispcc_mdp_clk>,
+ <&dispcc_mdp_vsync_clk>;
+ clock-names = "nrt_bus",
+ "iface",
+ "lut",
+ "core",
+ "vsync";
+
+ assigned-clocks = <&dispcc_mdp_vsync_clk>;
+ assigned-clock-rates = <19200000>;
+
+ operating-points-v2 = <&mdp_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dpu_intf1_out: endpoint {
+ remote-endpoint = <&dsi0_in>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dpu_intf2_out: endpoint {
+ remote-endpoint = <&dsi1_in>;
+ };
+ };
+ };
+
+ mdp_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-325000000 {
+ opp-hz = /bits/ 64 <325000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-375000000 {
+ opp-hz = /bits/ 64 <375000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-514000000 {
+ opp-hz = /bits/ 64 <514000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ dsi@ae94000 {
+ compatible = "qcom,sm8650-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae94000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <4>;
+
+ clocks = <&dispc_byte_clk>,
+ <&dispcc_intf_clk>,
+ <&dispcc_pclk>,
+ <&dispcc_esc_clk>,
+ <&dispcc_ahb_clk>,
+ <&gcc_bus_clk>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ assigned-clocks = <&dispcc_byte_clk>,
+ <&dispcc_pclk>;
+ assigned-clock-parents = <&dsi0_phy 0>, <&dsi0_phy 1>;
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&dsi0_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dpu_intf1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ };
+ };
+ };
+
+ dsi_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-187500000 {
+ opp-hz = /bits/ 64 <187500000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-358000000 {
+ opp-hz = /bits/ 64 <358000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+ };
+ };
+
+ dsi0_phy: phy@ae94400 {
+ compatible = "qcom,sm8650-dsi-phy-4nm";
+ reg = <0x0ae95000 0x200>,
+ <0x0ae95200 0x280>,
+ <0x0ae95500 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc_iface_clk>,
+ <&rpmhcc_ref_clk>;
+ clock-names = "iface", "ref";
+ };
+
+ dsi@ae96000 {
+ compatible = "qcom,sm8650-dsi-ctrl", "qcom,mdss-dsi-ctrl";
+ reg = <0x0ae96000 0x400>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <5>;
+
+ clocks = <&dispc_byte_clk>,
+ <&dispcc_intf_clk>,
+ <&dispcc_pclk>,
+ <&dispcc_esc_clk>,
+ <&dispcc_ahb_clk>,
+ <&gcc_bus_clk>;
+ clock-names = "byte",
+ "byte_intf",
+ "pixel",
+ "core",
+ "iface",
+ "bus";
+
+ assigned-clocks = <&dispcc_byte_clk>,
+ <&dispcc_pclk>;
+ assigned-clock-parents = <&dsi1_phy 0>, <&dsi1_phy 1>;
+
+ operating-points-v2 = <&dsi_opp_table>;
+ power-domains = <&rpmhpd RPMHPD_MMCX>;
+
+ phys = <&dsi1_phy>;
+ phy-names = "dsi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi1_in: endpoint {
+ remote-endpoint = <&dpu_intf2_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi1_out: endpoint {
+ };
+ };
+ };
+ };
+
+ dsi1_phy: phy@ae96400 {
+ compatible = "qcom,sm8650-dsi-phy-4nm";
+ reg = <0x0ae97000 0x200>,
+ <0x0ae97200 0x280>,
+ <0x0ae97500 0x400>;
+ reg-names = "dsi_phy",
+ "dsi_phy_lane",
+ "dsi_pll";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+
+ clocks = <&dispcc_iface_clk>,
+ <&rpmhcc_ref_clk>;
+ clock-names = "iface", "ref";
+ };
+ };
+...
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
if (adreno_is_a650(adreno_gpu) ||
adreno_is_a660(adreno_gpu) ||
+ adreno_is_a690(adreno_gpu) ||
adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu)) {
/* TODO: get ddr type from bootloader and use 2 for LPDDR4 */
uavflagprd_inv = 2;
}
- if (adreno_is_a690(adreno_gpu)) {
- hbb_lo = 2;
- amsbc = 1;
- rgb565_predicator = 1;
- uavflagprd_inv = 2;
- }
-
if (adreno_is_7c3(adreno_gpu)) {
hbb_lo = 1;
amsbc = 1;
/* Setting the primFifo thresholds default values,
* and vccCacheSkipDis=1 bit (0x200) for A640 and newer
*/
- if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu) || adreno_is_a690(adreno_gpu))
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
+ else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
if (adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
+ else if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x4fffff);
else if (adreno_is_a619(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x3fffff);
else if (adreno_is_a610(adreno_gpu))
else
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0x1fffff);
- gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu)) {
a6xx_set_cp_protect(gpu);
if (adreno_is_a660_family(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x00028801);
+ else
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, 0x1);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x0);
}
+ if (adreno_is_a690(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x90);
/* Set dualQ + disable afull for A660 GPU */
- if (adreno_is_a660(adreno_gpu))
+ else if (adreno_is_a660(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG, 0x66906);
else if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_UCHE_CMDQ_CONFIG,
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_10_0_SM8650_H
+#define _DPU_10_0_SM8650_H
+
+static const struct dpu_caps sm8650_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg sm8650_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+/* FIXME: get rid of DPU_CTL_SPLIT_DISPLAY in favour of proper ACTIVE_CTL support */
+static const struct dpu_ctl_cfg sm8650_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_sspp_cfg sm8650_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_3,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg sm8650_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_SDM845_MASK,
+ .sblk = &sdm845_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ },
+};
+
+static const struct dpu_dspp_cfg sm8650_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .features = DSPP_SC7180_MASK,
+ .sblk = &sdm845_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg sm8650_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x66000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x66400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ }, {
+ .name = "pingpong_8", .id = PINGPONG_8,
+ .base = 0x7e000, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_9", .id = PINGPONG_9,
+ .base = 0x7e400, .len = 0,
+ .features = BIT(DPU_PINGPONG_DITHER),
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ },
+};
+
+static const struct dpu_merge_3d_cfg sm8650_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x8,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x8,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x8,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x66700, .len = 0x8,
+ }, {
+ .name = "merge_3d_4", .id = MERGE_3D_4,
+ .base = 0x7e700, .len = 0x8,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg sm8650_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2) | BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x6,
+ .features = BIT(DPU_DSC_HW_REV_1_2),
+ .sblk = &dsc_sblk_1,
+ },
+};
+
+static const struct dpu_wb_cfg sm8650_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats,
+ .num_formats = ARRAY_SIZE(wb2_formats),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_intf_cfg sm8650_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x300,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x280,
+ .features = INTF_SC7280_MASK,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ },
+};
+
+static const struct dpu_perf_cfg sm8650_perf_data = {
+ .max_bw_low = 17000000,
+ .max_bw_high = 27000000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version sm8650_mdss_ver = {
+ .core_major_ver = 10,
+ .core_minor_ver = 0,
+};
+
+const struct dpu_mdss_cfg dpu_sm8650_cfg = {
+ .mdss_ver = &sm8650_mdss_ver,
+ .caps = &sm8650_dpu_caps,
+ .mdp = &sm8650_mdp,
+ .ctl_count = ARRAY_SIZE(sm8650_ctl),
+ .ctl = sm8650_ctl,
+ .sspp_count = ARRAY_SIZE(sm8650_sspp),
+ .sspp = sm8650_sspp,
+ .mixer_count = ARRAY_SIZE(sm8650_lm),
+ .mixer = sm8650_lm,
+ .dspp_count = ARRAY_SIZE(sm8650_dspp),
+ .dspp = sm8650_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8650_pp),
+ .pingpong = sm8650_pp,
+ .dsc_count = ARRAY_SIZE(sm8650_dsc),
+ .dsc = sm8650_dsc,
+ .merge_3d_count = ARRAY_SIZE(sm8650_merge_3d),
+ .merge_3d = sm8650_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8650_wb),
+ .wb = sm8650_wb,
+ .intf_count = ARRAY_SIZE(sm8650_intf),
+ .intf = sm8650_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &sm8650_perf_data,
+};
+
+#endif
static const struct dpu_caps msm8998_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1ac,
.features = VIG_MSM8998_MASK,
- .sblk = &msm8998_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1ac,
.features = DMA_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1ac,
.features = DMA_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1ac,
.features = DMA_CURSOR_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1ac,
.features = DMA_CURSOR_MSM8998_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1c8,
.features = VIG_SDM845_MASK_SDMA,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1c8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1c8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1c8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1c8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Richard Acayan. All rights reserved.
+ */
+
+#ifndef _DPU_4_1_SDM670_H
+#define _DPU_4_1_SDM670_H
+
+static const struct dpu_mdp_cfg sdm670_mdp = {
+ .name = "top_0",
+ .base = 0x0, .len = 0x45c,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
+ [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
+ [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
+ },
+};
+
+static const struct dpu_sspp_cfg sdm670_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x1c8,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_1_3,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ .clk_ctrl = DPU_CLK_CTRL_VIG0,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x1c8,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA0,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA1,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x1c8,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ .clk_ctrl = DPU_CLK_CTRL_DMA2,
+ },
+};
+
+static const struct dpu_dsc_cfg sdm670_dsc[] = {
+ {
+ .name = "dsc_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x140,
+ }, {
+ .name = "dsc_1", .id = DSC_1,
+ .base = 0x80400, .len = 0x140,
+ },
+};
+
+static const struct dpu_mdss_version sdm670_mdss_ver = {
+ .core_major_ver = 4,
+ .core_minor_ver = 1,
+};
+
+const struct dpu_mdss_cfg dpu_sdm670_cfg = {
+ .mdss_ver = &sdm670_mdss_ver,
+ .caps = &sdm845_dpu_caps,
+ .mdp = &sdm670_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm670_sspp),
+ .sspp = sdm670_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm670_dsc),
+ .dsc = sdm670_dsc,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .perf = &sdm845_perf_data,
+};
+
+#endif
static const struct dpu_caps sm8150_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
static const struct dpu_caps sc8180x_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED3,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_1,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_2,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
- .sblk = &sdm845_vig_sblk_3,
+ .sblk = &dpu_vig_sblk_qseed3_1_4,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc7180_qos_linear),
.entries = sc7180_qos_linear
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
- .features = VIG_SM6125_MASK,
- .sblk = &sm6125_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f8,
- .features = VIG_SC7180_MASK_SDMA,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
static const struct dpu_caps sc7180_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x9,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sc7180_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
static const struct dpu_caps sm6115_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm6115_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
static const struct dpu_caps sm6350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sc7180_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
.features = VIG_QCM2290_MASK,
- .sblk = &qcm2290_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_noscale,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &qcm2290_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
static const struct dpu_caps sm6375_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
.max_mixer_blendstages = 0x4,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm6115_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
static const struct dpu_caps sm8350_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x1f8,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f8,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
static const struct dpu_caps sc7280_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2400,
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
- [DPU_CLK_CTRL_WB2] = { .reg_off = 0x3b8, .bit_off = 24 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f8,
.features = VIG_SC7280_MASK_SDMA,
- .sblk = &sc7280_vig_sblk_0,
+ .sblk = &dpu_vig_sblk_qseed3_3_0_rot_v2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f8,
.features = DMA_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f8,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
static const struct dpu_caps sc8280xp_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 11,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x2ac,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_0,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x2ac,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x2ac,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x2ac,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x2ac,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
static const struct dpu_caps sm8450_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_0,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_1,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_2,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG2,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x32c,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8250_vig_sblk_3,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_1,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG3,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x32c,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x32c,
- .features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x32c,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x32c,
- .features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
static const struct dpu_caps sm8550_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
- .qseed_type = DPU_SSPP_SCALER_QSEED4,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_0,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_1,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_2", .id = SSPP_VIG2,
.base = 0x8000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_2,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 8,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_3", .id = SSPP_VIG3,
.base = 0xa000, .len = 0x344,
- .features = VIG_SC7180_MASK,
- .sblk = &sm8550_vig_sblk_3,
+ .features = VIG_SDM845_MASK,
+ .sblk = &dpu_vig_sblk_qseed3_3_2,
.xin_id = 12,
.type = SSPP_TYPE_VIG,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_0,
+ .sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_1,
+ .sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_2,
+ .sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x344,
.features = DMA_SDM845_MASK,
- .sblk = &sdm845_dma_sblk_3,
+ .sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_12", .id = SSPP_DMA4,
.base = 0x2c000, .len = 0x344,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sm8550_dma_sblk_4,
+ .sblk = &dpu_dma_sblk,
.xin_id = 14,
.type = SSPP_TYPE_DMA,
}, {
.name = "sspp_13", .id = SSPP_DMA5,
.base = 0x2e000, .len = 0x344,
.features = DMA_CURSOR_SDM845_MASK,
- .sblk = &sm8550_dma_sblk_5,
+ .sblk = &dpu_dma_sblk,
.xin_id = 15,
.type = SSPP_TYPE_DMA,
},
return to_dpu_kms(priv->kms);
}
-static void dpu_crtc_destroy(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- if (!crtc)
- return;
-
- drm_crtc_cleanup(crtc);
- kfree(dpu_crtc);
-}
-
static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const struct drm_crtc_funcs dpu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = dpu_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = dpu_crtc_reset,
.atomic_duplicate_state = dpu_crtc_duplicate_state,
struct dpu_crtc *dpu_crtc;
int i, ret;
- dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
- if (!dpu_crtc)
- return ERR_PTR(-ENOMEM);
+ dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base,
+ plane, cursor,
+ &dpu_crtc_funcs,
+ NULL);
+
+ if (IS_ERR(dpu_crtc))
+ return ERR_CAST(dpu_crtc);
crtc = &dpu_crtc->base;
crtc->dev = dev;
dpu_crtc_frame_event_work);
}
- drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
- NULL);
-
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
if (dpu_kms->catalog->dspp_count)
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
* @frame_done_timeout_ms: frame done timeout in ms
+ * @frame_done_timeout_cnt: atomic counter tracking the number of frame
+ * done timeouts
* @frame_done_timer: watchdog timer for frame done event
* @disp_info: local copy of msm_display_info struct
* @idle_pc_supported: indicate if idle power collaps is supported
void *crtc_frame_event_cb_data;
atomic_t frame_done_timeout_ms;
+ atomic_t frame_done_timeout_cnt;
struct timer_list frame_done_timer;
struct msm_display_info disp_info;
return linecount;
}
-static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i = 0;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- DPU_DEBUG_ENC(dpu_enc, "\n");
-
- mutex_lock(&dpu_enc->enc_lock);
-
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (phys->ops.destroy) {
- phys->ops.destroy(phys);
- --dpu_enc->num_phys_encs;
- dpu_enc->phys_encs[i] = NULL;
- }
- }
-
- if (dpu_enc->num_phys_encs)
- DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
- dpu_enc->num_phys_encs);
- dpu_enc->num_phys_encs = 0;
- mutex_unlock(&dpu_enc->enc_lock);
-
- drm_encoder_cleanup(drm_enc);
- mutex_destroy(&dpu_enc->enc_lock);
-}
-
void dpu_encoder_helper_split_config(
struct dpu_encoder_phys *phys_enc,
enum dpu_intf interface)
dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
+ atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
+
if (disp_info->intf_type == INTF_DP)
dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
else if (disp_info->intf_type == INTF_DSI)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d",
phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
atomic_read(&phys->vsync_cnt),
- atomic_read(&phys->underrun_cnt));
+ atomic_read(&phys->underrun_cnt),
+ atomic_read(&dpu_enc->frame_done_timeout_cnt));
seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
}
}
static int dpu_encoder_virt_add_phys_encs(
+ struct drm_device *dev,
struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params)
if (disp_info->intf_type == INTF_WB) {
- enc = dpu_encoder_phys_wb_init(params);
+ enc = dpu_encoder_phys_wb_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else if (disp_info->is_cmd_mode) {
- enc = dpu_encoder_phys_cmd_init(params);
+ enc = dpu_encoder_phys_cmd_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
++dpu_enc->num_phys_encs;
} else {
- enc = dpu_encoder_phys_vid_init(params);
+ enc = dpu_encoder_phys_vid_init(dev, params);
if (IS_ERR(enc)) {
DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
break;
}
- ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
dpu_enc, &phys_params);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+ if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
+ msm_disp_snapshot_state(drm_enc->dev);
+
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
};
static const struct drm_encoder_funcs dpu_encoder_funcs = {
- .destroy = dpu_encoder_destroy,
.late_register = dpu_encoder_late_register,
.early_unregister = dpu_encoder_early_unregister,
};
{
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
- struct drm_encoder *drm_enc = NULL;
- struct dpu_encoder_virt *dpu_enc = NULL;
- int ret = 0;
-
- dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
- if (!dpu_enc)
- return ERR_PTR(-ENOMEM);
+ struct dpu_encoder_virt *dpu_enc;
+ int ret;
- ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
- drm_enc_mode, NULL);
- if (ret) {
- devm_kfree(dev->dev, dpu_enc);
- return ERR_PTR(ret);
- }
+ dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
+ &dpu_encoder_funcs, drm_enc_mode, NULL);
+ if (IS_ERR(dpu_enc))
+ return ERR_CAST(dpu_enc);
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
mutex_init(&dpu_enc->rc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
- if (ret)
- goto fail;
+ if (ret) {
+ DPU_ERROR("failed to setup encoder\n");
+ return ERR_PTR(-ENOMEM);
+ }
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
DPU_DEBUG_ENC(dpu_enc, "created\n");
return &dpu_enc->base;
-
-fail:
- DPU_ERROR("failed to create encoder\n");
- if (drm_enc)
- dpu_encoder_destroy(drm_enc);
-
- return ERR_PTR(ret);
}
int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
case MSM_ENC_TX_COMPLETE:
fn_wait = phys->ops.wait_for_tx_complete;
break;
- case MSM_ENC_VBLANK:
- fn_wait = phys->ops.wait_for_vblank;
- break;
default:
DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
event);
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
* @atomic_check: DRM Call. Atomic check new DRM state.
- * @destroy: DRM Call. Destroy and release resources.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
* @wait_for_commit_done: Wait for hardware to have flushed the
* current pending frames to hardware
int (*atomic_check)(struct dpu_encoder_phys *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
- void (*destroy)(struct dpu_encoder_phys *encoder);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
- int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @dev: Corresponding device for devres management
* @p: Pointer to init params structure
* Return: Error code or newly allocated encoder
*/
-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @dev: Corresponding device for devres management
* @init: Pointer to init info structure with initialization params
*/
-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p);
/**
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
+#include <drm/drm_managed.h>
+
#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->base.parent ? \
(e)->base.parent->base.id : -1, \
phys_enc->enable_state = DPU_ENC_DISABLED;
}
-static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_phys_cmd *cmd_enc =
- to_dpu_encoder_phys_cmd(phys_enc);
-
- kfree(cmd_enc);
-}
-
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
}
-static int dpu_encoder_phys_cmd_wait_for_vblank(
- struct dpu_encoder_phys *phys_enc)
-{
- int rc = 0;
- struct dpu_encoder_phys_cmd *cmd_enc;
- struct dpu_encoder_wait_info wait_info;
-
- cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
-
- /* only required for master controller */
- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- return rc;
-
- wait_info.wq = &cmd_enc->pending_vblank_wq;
- wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
-
- atomic_inc(&cmd_enc->pending_vblank_cnt);
-
- rc = dpu_encoder_helper_wait_for_irq(phys_enc,
- phys_enc->irq[INTR_IDX_RDPTR],
- dpu_encoder_phys_cmd_te_rd_ptr_irq,
- &wait_info);
-
- return rc;
-}
-
static void dpu_encoder_phys_cmd_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
- ops->destroy = dpu_encoder_phys_cmd_destroy;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
- ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
}
-struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
DPU_DEBUG("intf\n");
- cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ cmd_enc = drmm_kzalloc(dev, sizeof(*cmd_enc), GFP_KERNEL);
if (!cmd_enc) {
DPU_ERROR("failed to allocate\n");
return ERR_PTR(-ENOMEM);
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
+#include <drm/drm_managed.h>
+
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
(e) && (e)->parent ? \
(e)->parent->base.id : -1, \
phys_enc->enable_state = DPU_ENC_ENABLING;
}
-static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
-{
- DPU_DEBUG_VIDENC(phys_enc, "\n");
- kfree(phys_enc);
-}
-
-static int dpu_encoder_phys_vid_wait_for_vblank(
+static int dpu_encoder_phys_vid_wait_for_tx_complete(
struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_wait_info wait_info;
* scanout buffer) don't latch properly..
*/
if (dpu_encoder_phys_vid_is_master(phys_enc)) {
- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
- ret = dpu_encoder_phys_vid_wait_for_vblank(phys_enc);
+ ret = dpu_encoder_phys_vid_wait_for_tx_complete(phys_enc);
if (ret) {
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
- ops->destroy = dpu_encoder_phys_vid_destroy;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_commit_done;
- ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
- ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_tx_complete;
ops->irq_control = dpu_encoder_phys_vid_irq_control;
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
}
-struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
return ERR_PTR(-EINVAL);
}
- phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ phys_enc = drmm_kzalloc(dev, sizeof(*phys_enc), GFP_KERNEL);
if (!phys_enc) {
DPU_ERROR("failed to create encoder due to memory allocation error\n");
return ERR_PTR(-ENOMEM);
#include <linux/debugfs.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include "dpu_encoder_phys.h"
#include "dpu_formats.h"
phys_enc->enable_state = DPU_ENC_DISABLED;
}
-/**
- * dpu_encoder_phys_wb_destroy - destroy writeback encoder
- * @phys_enc: Pointer to physical encoder
- */
-static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
-{
- if (!phys_enc)
- return;
-
- DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
-
- kfree(phys_enc);
-}
-
static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
struct drm_writeback_job *job)
{
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
- ops->destroy = dpu_encoder_phys_wb_destroy;
ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
/**
* dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @dev: Corresponding device for devres management
* @p: Pointer to init info structure with initialization params
*/
-struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
return ERR_PTR(-EINVAL);
}
- wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ wb_enc = drmm_kzalloc(dev, sizeof(*wb_enc), GFP_KERNEL);
if (!wb_enc) {
DPU_ERROR("failed to allocate wb phys_enc enc\n");
return ERR_PTR(-ENOMEM);
BIT(DPU_SSPP_CSC_10BIT))
#define VIG_MSM8998_MASK \
- (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3))
+ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3_COMPATIBLE))
#define VIG_SDM845_MASK_SDMA \
(VIG_SDM845_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-#define VIG_SC7180_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
-
-#define VIG_SM6125_MASK \
- (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
-
-#define VIG_SC7180_MASK_SDMA \
- (VIG_SC7180_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
-
#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
#define DMA_MSM8998_MASK \
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
#define VIG_SC7280_MASK \
- (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+ (VIG_SDM845_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
#define VIG_SC7280_MASK_SDMA \
(VIG_SC7280_MASK | BIT(DPU_SSPP_SMART_DMA_V2))
* SSPP sub blocks config
*************************************************************/
+#define SSPP_SCALER_VER(maj, min) (((maj) << 16) | (min))
+
/* SSPP common configuration */
-#define _VIG_SBLK(sdma_pri, qseed_ver) \
+#define _VIG_SBLK(scaler_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
- .smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
- .id = qseed_ver, \
+ .version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
- .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.rotation_cfg = NULL, \
}
-#define _VIG_SBLK_ROT(sdma_pri, qseed_ver, rot_cfg) \
+#define _VIG_SBLK_ROT(scaler_ver, rot_cfg) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
.maxupscale = MAX_UPSCALE_RATIO, \
- .smart_dma_priority = sdma_pri, \
.scaler_blk = {.name = "scaler", \
- .id = qseed_ver, \
+ .version = scaler_ver, \
.base = 0xa00, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
- .id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.rotation_cfg = rot_cfg, \
}
-#define _DMA_SBLK(sdma_pri) \
+#define _VIG_SBLK_NOSCALE() \
+ { \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ }
+
+#define _DMA_SBLK() \
{ \
.maxdwnscale = SSPP_UNITY_SCALE, \
.maxupscale = SSPP_UNITY_SCALE, \
- .smart_dma_priority = sdma_pri, \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
}
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_0 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_1 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
- _VIG_SBLK(0, DPU_SSPP_SCALER_QSEED3);
-
static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
.rot_maxheight = 1088,
.rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
.rot_format_list = rotation_v2_formats,
};
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED3);
-static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED3);
-
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK(1);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK(2);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK(3);
-static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK(4);
-
-static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
- _VIG_SBLK(4, DPU_SSPP_SCALER_QSEED4);
-
-static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
- _VIG_SBLK_ROT(4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
-
-static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
- _VIG_SBLK(2, DPU_SSPP_SCALER_QSEED4);
-
-static const struct dpu_sspp_sub_blks sm6125_vig_sblk_0 =
- _VIG_SBLK(3, DPU_SSPP_SCALER_QSEED3LITE);
-
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
- _VIG_SBLK(5, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
- _VIG_SBLK(6, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
-
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
- _VIG_SBLK(7, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
- _VIG_SBLK(8, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
- _VIG_SBLK(9, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
- _VIG_SBLK(10, DPU_SSPP_SCALER_QSEED4);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK(5);
-static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK(6);
-
-#define _VIG_SBLK_NOSCALE(sdma_pri) \
- { \
- .maxdwnscale = SSPP_UNITY_SCALE, \
- .maxupscale = SSPP_UNITY_SCALE, \
- .smart_dma_priority = sdma_pri, \
- .format_list = plane_formats_yuv, \
- .num_formats = ARRAY_SIZE(plane_formats_yuv), \
- .virt_format_list = plane_formats, \
- .virt_num_formats = ARRAY_SIZE(plane_formats), \
- }
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_noscale =
+ _VIG_SBLK_NOSCALE();
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_2 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 2));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_3 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 3));
-static const struct dpu_sspp_sub_blks qcm2290_vig_sblk_0 = _VIG_SBLK_NOSCALE(2);
-static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK(1);
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_1_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(1, 4));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_2_4 =
+ _VIG_SBLK(SSPP_SCALER_VER(2, 4));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 0));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_0_rot_v2 =
+ _VIG_SBLK_ROT(SSPP_SCALER_VER(3, 0),
+ &dpu_rot_sc7280_cfg_v2);
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_1 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 1));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_2 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 2));
+
+static const struct dpu_sspp_sub_blks dpu_vig_sblk_qseed3_3_3 =
+ _VIG_SBLK(SSPP_SCALER_VER(3, 3));
+
+static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
/*************************************************************
* MIXER sub blocks config
* DSPP sub blocks config
*************************************************************/
static const struct dpu_dspp_sub_blks msm8998_dspp_sblk = {
- .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .base = 0x1700,
.len = 0x90, .version = 0x10007},
};
static const struct dpu_dspp_sub_blks sdm845_dspp_sblk = {
- .pcc = {.name = "pcc", .id = DPU_DSPP_PCC, .base = 0x1700,
+ .pcc = {.name = "pcc", .base = 0x1700,
.len = 0x90, .version = 0x40000},
};
* PINGPONG sub blocks config
*************************************************************/
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
- .te2 = {.name = "te2", .id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .te2 = {.name = "te2", .base = 0x2000, .len = 0x0,
.version = 0x1},
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .dither = {.name = "dither", .base = 0x30e0,
.len = 0x20, .version = 0x10000},
};
static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
- .dither = {.name = "dither", .id = DPU_PINGPONG_DITHER, .base = 0xe0,
+ .dither = {.name = "dither", .base = 0xe0,
.len = 0x20, .version = 0x20000},
};
static const u32 msm8998_nrt_pri_lvl[] = {1, 1, 1, 1};
static const u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+static const u32 sm8650_rt_pri_lvl[] = {4, 4, 5, 5, 5, 5, 5, 6};
static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
{
},
};
+static const struct dpu_vbif_cfg sm8650_vbif[] = {
+ {
+ .name = "vbif_rt", .id = VBIF_RT,
+ .base = 0, .len = 0x1074,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rp_remap_size = 0x40,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sm8650_rt_pri_lvl),
+ .priority_lvl = sm8650_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 16,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
/*************************************************************
* PERF data config
*************************************************************/
#include "catalog/dpu_3_0_msm8998.h"
#include "catalog/dpu_4_0_sdm845.h"
+#include "catalog/dpu_4_1_sdm670.h"
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_8_1_sm8450.h"
#include "catalog/dpu_9_0_sm8550.h"
+
+#include "catalog/dpu_10_0_sm8650.h"
/**
* SSPP sub-blocks/features
* @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
- * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
- * @DPU_SSPP_SCALER_QSEED3LITE, QSEED3 Lite alogorithm support
- * @DPU_SSPP_SCALER_QSEED4, QSEED4 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3_COMPATIBLE, QSEED3-compatible alogorithm support (includes QSEED3, QSEED3LITE and QSEED4)
* @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
* @DPU_SSPP_CSC, Support of Color space converion
* @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
*/
enum {
DPU_SSPP_SCALER_QSEED2 = 0x1,
- DPU_SSPP_SCALER_QSEED3,
- DPU_SSPP_SCALER_QSEED3LITE,
- DPU_SSPP_SCALER_QSEED4,
+ DPU_SSPP_SCALER_QSEED3_COMPATIBLE,
DPU_SSPP_SCALER_RGB,
DPU_SSPP_CSC,
DPU_SSPP_CSC_10BIT,
u32 len; \
unsigned long features
-/**
- * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
- * @name: string name for debug purposes
- * @id: enum identifying this sub-block
- * @base: offset of this sub-block relative to the block
- * offset
- * @len register block length of this sub-block
- */
-#define DPU_HW_SUBBLK_INFO \
- char name[DPU_HW_BLK_NAME_LEN]; \
- u32 id; \
- u32 base; \
- u32 len
-
/**
* struct dpu_scaler_blk: Scaler information
- * @info: HW register and features supported by this sub-blk
- * @version: qseed block revision
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
+ * @version: qseed block revision, on QSEED3+ platforms this is the value of
+ * scaler_blk.base + QSEED3_HW_VERSION registers.
*/
struct dpu_scaler_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
u32 version;
};
struct dpu_csc_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
};
/**
* struct dpu_pp_blk : Pixel processing sub-blk information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
* @version: HW Algorithm version
*/
struct dpu_pp_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
u32 version;
};
/**
* struct dpu_dsc_blk - DSC Encoder sub-blk information
- * @info: HW register and features supported by this sub-blk
+ * @name: string name for debug purposes
+ * @base: offset of this sub-block relative to the block offset
+ * @len: register block length of this sub-block
*/
struct dpu_dsc_blk {
- DPU_HW_SUBBLK_INFO;
+ char name[DPU_HW_BLK_NAME_LEN];
+ u32 base;
+ u32 len;
};
/**
* @max_mixer_width max layer mixer line width support.
* @max_mixer_blendstages max layer mixer blend stages or
* supported z order
- * @qseed_type qseed2 or qseed3 support.
* @has_src_split source split feature status
* @has_dim_layer dim layer feature status
* @has_idle_pc indicate if idle power collapse feature is supported
struct dpu_caps {
u32 max_mixer_width;
u32 max_mixer_blendstages;
- u32 qseed_type;
bool has_src_split;
bool has_dim_layer;
bool has_idle_pc;
* common: Pointer to common configurations shared by sub blocks
* @maxdwnscale: max downscale ratio supported(without DECIMATION)
* @maxupscale: maxupscale ratio supported
- * @smart_dma_priority: hw priority of rect1 of multirect pipe
* @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
* @qseed_ver: qseed version
* @scaler_blk:
struct dpu_sspp_sub_blks {
u32 maxdwnscale;
u32 maxupscale;
- u32 smart_dma_priority;
u32 max_per_pipe_bw;
u32 qseed_ver;
struct dpu_scaler_blk scaler_blk;
extern const struct dpu_mdss_cfg dpu_msm8998_cfg;
extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
+extern const struct dpu_mdss_cfg dpu_sdm670_cfg;
extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc8280xp_cfg;
extern const struct dpu_mdss_cfg dpu_sm8450_cfg;
extern const struct dpu_mdss_cfg dpu_sm8550_cfg;
+extern const struct dpu_mdss_cfg dpu_sm8650_cfg;
#endif /* _DPU_HW_CATALOG_H */
*/
#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_kms.h"
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
-struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
- void __iomem *addr,
- u32 mixer_count,
- const struct dpu_lm_cfg *mixer)
+struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
+ const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer)
{
struct dpu_hw_ctl *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
-{
- kfree(ctx);
-}
/**
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
* Should be called before accessing any ctl_path register.
+ * @dev: Corresponding device for devres management
* @cfg: ctl_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mixer_count: Number of mixers in @mixer
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
*/
-struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
- void __iomem *addr,
- u32 mixer_count,
- const struct dpu_lm_cfg *mixer);
-
-/**
- * dpu_hw_ctl_destroy(): Destroys ctl driver context
- * should be called to free the context
- */
-void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
+ const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer);
#endif /*_DPU_HW_CTL_H */
* Copyright (c) 2020-2022, Linaro Limited
*/
+#include <drm/drm_managed.h>
+
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
-struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
-{
- kfree(dsc);
-}
/**
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
+ * @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: Error code or allocated dpu_hw_dsc context
*/
-struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
- void __iomem *addr);
+struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr);
/**
* dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
+ * @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Returns: Error code or allocated dpu_hw_dsc context
*/
-struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
/**
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
*/
+#include <drm/drm_managed.h>
+
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
}
-struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
c->ops.setup_pcc = dpu_setup_dspp_pcc;
}
-struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
+ const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_dspp *c;
if (!addr)
return ERR_PTR(-EINVAL);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
-{
- kfree(dspp);
-}
-
-
/**
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
* should be called once before accessing every DSPP.
+ * @dev: Corresponding device for devres management
* @cfg: DSPP catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: pointer to structure or ERR_PTR
*/
-struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_dspp_destroy(): Destroys DSPP driver context
- * @dspp: Pointer to DSPP driver context
- */
-void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
+ const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_DSPP_H */
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <drm/drm_managed.h>
+
#include "dpu_core_irq.h"
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
return intr_status;
}
-struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
unsigned int i;
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
return intr;
}
-void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
-{
- kfree(intr);
-}
-
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
unsigned int irq_idx,
void (*irq_cb)(void *arg),
/**
* dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @dev: Corresponding device for devres management
* @addr: mapped register io address of MDP
* @m: pointer to MDSS catalog data
*/
-struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
-/**
- * dpu_hw_intr_destroy(): Cleanup interrutps hw object
- * @intr: pointer to interrupts hw object
- */
-void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
#endif
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#define INTF_TIMING_ENGINE_EN 0x000
#define INTF_CONFIG 0x004
#define INTF_HSYNC_CTL 0x008
DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
}
-struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
+ const struct dpu_intf_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_intf *c;
return NULL;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
-{
- kfree(intf);
-}
-
/**
* dpu_hw_intf_init() - Initializes the INTF driver for the passed
* interface catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: interface catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
*/
-struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_intf_destroy(): Destroys INTF driver context
- * @intf: Pointer to INTF driver context
- */
-void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
+ const struct dpu_intf_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_INTF_H */
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
ops->collect_misr = dpu_hw_lm_collect_misr;
}
-struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
+ const struct dpu_lm_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_mixer *c;
return NULL;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
-{
- kfree(lm);
-}
/**
* dpu_hw_lm_init() - Initializes the mixer hw driver object.
* should be called once before accessing every mixer.
+ * @dev: Corresponding device for devres management
* @cfg: mixer catalog entry for which driver object is required
* @addr: mapped register io address of MDP
*/
-struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_lm_destroy(): Destroys layer mixer driver context
- * @lm: Pointer to LM driver context
- */
-void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
+ const struct dpu_lm_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_LM_H */
PINGPONG_5,
PINGPONG_6,
PINGPONG_7,
+ PINGPONG_8,
+ PINGPONG_9,
PINGPONG_S0,
PINGPONG_MAX
};
MERGE_3D_1,
MERGE_3D_2,
MERGE_3D_3,
+ MERGE_3D_4,
MERGE_3D_MAX
};
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
-struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
+ const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_merge_3d *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
-{
- kfree(hw);
-}
/**
* dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
* merge3d catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: Error code or allocated dpu_hw_merge_3d context
*/
-struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_merge_3d_destroy - destroys merge_3d driver context
- * should be called to free the context
- * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init
- */
-void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp);
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
+ const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_MERGE3D_H */
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
return 0;
}
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
+ const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_pingpong *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
-{
- kfree(pp);
-}
/**
* dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
* pingpong catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_pingpong context
*/
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_pingpong_destroy - destroys pingpong driver context
- * should be called to free the context
- * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
- */
-void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
+ const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_PINGPONG_H */
#include "msm_mdss.h"
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
format);
}
-static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_sspp *ctx)
-{
- if (!ctx)
- return 0;
-
- return dpu_hw_get_scaler3_ver(&ctx->hw,
- ctx->cap->sblk->scaler_blk.base);
-}
-
/*
* dpu_hw_sspp_setup_rects()
*/
test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
- if (test_bit(DPU_SSPP_SCALER_QSEED3, &features) ||
- test_bit(DPU_SSPP_SCALER_QSEED3LITE, &features) ||
- test_bit(DPU_SSPP_SCALER_QSEED4, &features)) {
+ if (test_bit(DPU_SSPP_SCALER_QSEED3_COMPATIBLE, &features))
c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
- c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
- }
if (test_bit(DPU_SSPP_CDP, &features))
c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
cfg->len,
kms);
- if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED3LITE) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED2) ||
- cfg->features & BIT(DPU_SSPP_SCALER_QSEED4))
+ if (sblk->scaler_blk.len)
dpu_debugfs_create_regset32("scaler_blk", 0400,
debugfs_root,
sblk->scaler_blk.base + cfg->base,
}
#endif
-struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct msm_mdss_data *mdss_data,
- const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
+ const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr,
+ const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
if (!addr)
return ERR_PTR(-EINVAL);
- hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ hw_pipe = drmm_kzalloc(dev, sizeof(*hw_pipe), GFP_KERNEL);
if (!hw_pipe)
return ERR_PTR(-ENOMEM);
return hw_pipe;
}
-
-void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx)
-{
- kfree(ctx);
-}
-
#define DPU_SSPP_ROT_90 BIT(3)
#define DPU_SSPP_SOLID_FILL BIT(4)
-/**
- * Define all scaler feature bits in catalog
- */
-#define DPU_SSPP_SCALER (BIT(DPU_SSPP_SCALER_RGB) | \
- BIT(DPU_SSPP_SCALER_QSEED2) | \
- BIT(DPU_SSPP_SCALER_QSEED3) | \
- BIT(DPU_SSPP_SCALER_QSEED3LITE) | \
- BIT(DPU_SSPP_SCALER_QSEED4))
-
-/*
- * Define all CSC feature bits in catalog
- */
-#define DPU_SSPP_CSC_ANY (BIT(DPU_SSPP_CSC) | \
- BIT(DPU_SSPP_CSC_10BIT))
-
/**
* Component indices
*/
struct dpu_hw_scaler3_cfg *scaler3_cfg,
const struct dpu_format *format);
- /**
- * get_scaler_ver - get scaler h/w version
- * @ctx: Pointer to pipe context
- */
- u32 (*get_scaler_ver)(struct dpu_hw_sspp *ctx);
-
/**
* setup_cdp - setup client driven prefetch
* @pipe: Pointer to software pipe context
/**
* dpu_hw_sspp_init() - Initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
+ * @dev: Corresponding device for devres management
* @cfg: Pipe catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_data: UBWC / MDSS configuration data
* @mdss_rev: dpu core's major and minor versions
*/
-struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct msm_mdss_data *mdss_data,
- const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_sspp_destroy(): Destroys SSPP driver context
- * should be called during Hw pipe cleanup.
- * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
- */
-void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx);
+struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
+ const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr,
+ const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev);
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dentry *entry);
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
-struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
- void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
+ const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mdp *mdp;
if (!addr)
return ERR_PTR(-EINVAL);
- mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ mdp = drmm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return ERR_PTR(-ENOMEM);
return mdp;
}
-
-void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
-{
- kfree(mdp);
-}
-
/**
* dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
*/
-struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
- void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
+ const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
}
-u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
- u32 scaler_offset)
-{
- return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
-}
-
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10)
u32 scaler_offset, u32 scaler_version,
const struct dpu_format *format);
-u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
- u32 scaler_offset);
-
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
ops->set_write_gather_en = dpu_hw_set_write_gather_en;
}
-struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
+ const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_vbif *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
-{
- kfree(vbif);
-}
/**
* dpu_hw_vbif_init() - Initializes the VBIF driver for the passed
* VBIF catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: VBIF catalog entry for which driver object is required
* @addr: Mapped register io address of MDSS
*/
-struct dpu_hw_vbif *dpu_hw_vbif_init(const struct dpu_vbif_cfg *cfg,
- void __iomem *addr);
-
-void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+struct dpu_hw_vbif *dpu_hw_vbif_init(struct drm_device *dev,
+ const struct dpu_vbif_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_VBIF_H */
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
-struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
+ const struct dpu_wb_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_wb *c;
if (!addr)
return ERR_PTR(-EINVAL);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
return c;
}
-
-void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
-{
- kfree(hw_wb);
-}
/**
* dpu_hw_wb_init() - Initializes the writeback hw driver object.
+ * @dev: Corresponding device for devres management
* @cfg: wb_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_wb context
*/
-struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
- * @hw_wb: Pointer to writeback hw driver object
- */
-void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
+ const struct dpu_wb_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_WB_H */
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
void *p = dpu_hw_util_get_log_mask_ptr();
struct dentry *entry;
- struct drm_device *dev;
- struct msm_drm_private *priv;
- int i;
if (!p)
return -EINVAL;
if (minor->type != DRM_MINOR_PRIMARY)
return 0;
- dev = dpu_kms->dev;
- priv = dev->dev_private;
-
entry = debugfs_create_dir("debug", minor->debugfs_root);
debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
dpu_debugfs_core_irq_init(dpu_kms, entry);
dpu_debugfs_sspp_init(dpu_kms, entry);
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
- if (priv->dp[i])
- msm_dp_debugfs_init(priv->dp[i], minor);
- }
-
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
{
int i;
- if (dpu_kms->hw_intr)
- dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
/* safe to call these more than once during shutdown */
_dpu_kms_mmu_destroy(dpu_kms);
- if (dpu_kms->catalog) {
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
- dpu_kms->hw_vbif[i] = NULL;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ dpu_kms->hw_vbif[i] = NULL;
}
- if (dpu_kms->rm_init)
- dpu_rm_destroy(&dpu_kms->rm);
- dpu_kms->rm_init = false;
-
dpu_kms->catalog = NULL;
- if (dpu_kms->hw_mdp)
- dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
}
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int i;
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
if (!priv)
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
- msm_dp_irq_postinstall(priv->dp[i]);
-
return 0;
}
if (!dpu_kms->catalog) {
DPU_ERROR("device config not known!\n");
rc = -EINVAL;
- goto power_error;
+ goto err_pm_put;
}
/*
rc = _dpu_kms_mmu_init(dpu_kms);
if (rc) {
DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->mdss = msm_mdss_get_mdss_data(dpu_kms->pdev->dev.parent);
if (IS_ERR(dpu_kms->mdss)) {
rc = PTR_ERR(dpu_kms->mdss);
DPU_ERROR("failed to get MDSS data: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
if (!dpu_kms->mdss) {
rc = -EINVAL;
DPU_ERROR("NULL MDSS data\n");
- goto power_error;
+ goto err_pm_put;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
+ rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
- goto power_error;
+ goto err_pm_put;
}
- dpu_kms->rm_init = true;
-
- dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp,
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
+ dpu_kms->catalog->mdp,
dpu_kms->mmio,
dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
- goto power_error;
+ goto err_pm_put;
}
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_hw_vbif *hw;
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
- hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
+ hw = dpu_hw_vbif_init(dev, vbif, dpu_kms->vbif[vbif->id]);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
- goto power_error;
+ goto err_pm_put;
}
dpu_kms->hw_vbif[vbif->id] = hw;
rc = dpu_core_perf_init(&dpu_kms->perf, dpu_kms->catalog->perf, max_core_clk_rate);
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
- goto perf_err;
+ goto err_pm_put;
}
- dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
- if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
DPU_ERROR("hw_intr init failed: %d\n", rc);
dpu_kms->hw_intr = NULL;
- goto hw_intr_init_err;
+ goto err_pm_put;
}
dev->mode_config.min_width = 0;
rc = _dpu_kms_drm_obj_init(dpu_kms);
if (rc) {
DPU_ERROR("modeset init failed: %d\n", rc);
- goto drm_obj_init_err;
+ goto err_pm_put;
}
dpu_vbif_init_memtypes(dpu_kms);
return 0;
-drm_obj_init_err:
-hw_intr_init_err:
-perf_err:
-power_error:
+err_pm_put:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
{ .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
+ { .compatible = "qcom,sdm670-dpu", .data = &dpu_sdm670_cfg, },
{ .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
{ .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
{ .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
{ .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
{ .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
+ { .compatible = "qcom,sm8650-dpu", .data = &dpu_sm8650_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
struct drm_private_obj global_state;
struct dpu_rm rm;
- bool rm_init;
struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
struct dpu_hw_mdp *hw_mdp;
struct dpu_plane {
struct drm_plane base;
- struct mutex lock;
-
enum dpu_sspp pipe;
uint32_t color_fill;
scale_cfg->src_height[i] /= chroma_subsmpl_v;
}
- if (pipe_hw->cap->features &
- BIT(DPU_SSPP_SCALER_QSEED4)) {
+ if (pipe_hw->cap->sblk->scaler_blk.version >= 0x3000) {
scale_cfg->preload_x[i] = DPU_QSEED4_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED4_DEFAULT_PRELOAD_V;
} else {
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pipe->sspp->cap->features & DPU_SSPP_SCALER) ||
- !(pipe->sspp->cap->features & DPU_SSPP_CSC_ANY))) {
+ (!pipe->sspp->cap->sblk->scaler_blk.len ||
+ !pipe->sspp->cap->sblk->csc_blk.len)) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
plane);
int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
max_linewidth = pdpu->catalog->caps->max_linewidth;
- if (drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
+ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
+ _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
/*
* In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that
* full width is more than max_linewidth, thus each rect is
* wider than allowed.
*/
- if (DPU_FORMAT_IS_UBWC(fmt)) {
+ if (DPU_FORMAT_IS_UBWC(fmt) &&
+ drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
}
-static void dpu_plane_destroy(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
- struct dpu_plane_state *pstate;
-
- DPU_DEBUG_PLANE(pdpu, "\n");
-
- if (pdpu) {
- pstate = to_dpu_plane_state(plane->state);
- _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, false);
-
- if (pstate->r_pipe.sspp)
- _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, false);
-
- mutex_destroy(&pdpu->lock);
-
- /* this will destroy the states as well */
- drm_plane_cleanup(plane);
-
- kfree(pdpu);
- }
-}
-
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = dpu_plane_destroy,
.reset = dpu_plane_reset,
.atomic_duplicate_state = dpu_plane_duplicate_state,
.atomic_destroy_state = dpu_plane_destroy_state,
struct dpu_hw_sspp *pipe_hw;
uint32_t num_formats;
uint32_t supported_rotations;
- int ret = -EINVAL;
-
- /* create and zero local structure */
- pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
- if (!pdpu) {
- DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
- ret = -ENOMEM;
- return ERR_PTR(ret);
- }
-
- /* cache local stuff for later */
- plane = &pdpu->base;
- pdpu->pipe = pipe;
+ int ret;
/* initialize underlying h/w driver */
pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
DPU_ERROR("[%u]SSPP is invalid\n", pipe);
- goto clean_plane;
+ return ERR_PTR(-EINVAL);
}
format_list = pipe_hw->cap->sblk->format_list;
num_formats = pipe_hw->cap->sblk->num_formats;
- ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
+ 0xff, &dpu_plane_funcs,
format_list, num_formats,
supported_format_modifiers, type, NULL);
- if (ret)
- goto clean_plane;
+ if (IS_ERR(pdpu))
+ return ERR_CAST(pdpu);
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
pdpu->catalog = kms->catalog;
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
- mutex_init(&pdpu->lock);
-
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
-
-clean_plane:
- kfree(pdpu);
- return ERR_PTR(ret);
}
struct msm_display_topology topology;
};
-int dpu_rm_destroy(struct dpu_rm *rm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
- struct dpu_hw_dspp *hw;
-
- if (rm->dspp_blks[i]) {
- hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
- dpu_hw_dspp_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
- struct dpu_hw_pingpong *hw;
-
- if (rm->pingpong_blks[i]) {
- hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
- dpu_hw_pingpong_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
- struct dpu_hw_merge_3d *hw;
-
- if (rm->merge_3d_blks[i]) {
- hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
- dpu_hw_merge_3d_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
- struct dpu_hw_mixer *hw;
-
- if (rm->mixer_blks[i]) {
- hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
- dpu_hw_lm_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
- struct dpu_hw_ctl *hw;
-
- if (rm->ctl_blks[i]) {
- hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
- dpu_hw_ctl_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
- dpu_hw_intf_destroy(rm->hw_intf[i]);
-
- for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
- struct dpu_hw_dsc *hw;
-
- if (rm->dsc_blks[i]) {
- hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
- dpu_hw_dsc_destroy(hw);
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
- dpu_hw_wb_destroy(rm->hw_wb[i]);
-
- for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++)
- dpu_hw_sspp_destroy(rm->hw_sspp[i]);
-
- return 0;
-}
-
-int dpu_rm_init(struct dpu_rm *rm,
+int dpu_rm_init(struct drm_device *dev,
+ struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio)
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
- hw = dpu_hw_lm_init(lm, mmio);
+ hw = dpu_hw_lm_init(dev, lm, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
struct dpu_hw_merge_3d *hw;
const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
- hw = dpu_hw_merge_3d_init(merge_3d, mmio);
+ hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed merge_3d object creation: err %d\n",
struct dpu_hw_pingpong *hw;
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
- hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver);
+ hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
struct dpu_hw_intf *hw;
const struct dpu_intf_cfg *intf = &cat->intf[i];
- hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver);
+ hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc);
struct dpu_hw_wb *hw;
const struct dpu_wb_cfg *wb = &cat->wb[i];
- hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver);
+ hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed wb object creation: err %d\n", rc);
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
- hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer);
+ hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
struct dpu_hw_dspp *hw;
const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
- hw = dpu_hw_dspp_init(dspp, mmio);
+ hw = dpu_hw_dspp_init(dev, dspp, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed dspp object creation: err %d\n", rc);
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
- hw = dpu_hw_dsc_init_1_2(dsc, mmio);
+ hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
else
- hw = dpu_hw_dsc_init(dsc, mmio);
+ hw = dpu_hw_dsc_init(dev, dsc, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
struct dpu_hw_sspp *hw;
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
- hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver);
+ hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed sspp object creation: err %d\n", rc);
return 0;
fail:
- dpu_rm_destroy(rm);
-
return rc ? rc : -EFAULT;
}
/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
+ * @dev: Corresponding device for devres management
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mdss_data: Pointer to MDSS / UBWC configuration
* @mmio: mapped register io address of MDP
* @Return: 0 on Success otherwise -ERROR
*/
-int dpu_rm_init(struct dpu_rm *rm,
+int dpu_rm_init(struct drm_device *dev,
+ struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio);
-/**
- * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
- * @rm: DPU Resource Manager handle
- * @Return: 0 on Success otherwise -ERROR
- */
-int dpu_rm_destroy(struct dpu_rm *rm);
-
/**
* dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
* the use connections and user requirements, specified through related
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
drm_gem_object_put(val);
}
-static void mdp4_crtc_destroy(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
-
- kfree(mdp4_crtc);
-}
-
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ unsigned long flags;
DBG("%s", mdp4_crtc->name);
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp4_crtc->event);
+ spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
+ }
+
mdp4_crtc->enabled = false;
}
static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp4_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
"DMA_P", "DMA_S", "DMA_E",
};
+static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
+{
+ struct mdp4_crtc *mdp4_crtc = ptr;
+
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+}
+
/* initialize crtc */
struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
struct drm_plane *plane, int id, int ovlp_id,
{
struct drm_crtc *crtc = NULL;
struct mdp4_crtc *mdp4_crtc;
+ int ret;
- mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
- if (!mdp4_crtc)
- return ERR_PTR(-ENOMEM);
+ mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base,
+ plane, NULL,
+ &mdp4_crtc_funcs, NULL);
+ if (IS_ERR(mdp4_crtc))
+ return ERR_CAST(mdp4_crtc);
crtc = &mdp4_crtc->base;
drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
+ ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc);
+ if (ret)
+ return ERR_PTR(ret);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
- NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
return crtc;
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_dsi_encoder *mdp4_dsi_encoder = to_mdp4_dsi_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(mdp4_dsi_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dsi_encoder_funcs = {
- .destroy = mdp4_dsi_encoder_destroy,
-};
-
static void mdp4_dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
/* initialize encoder */
struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_dsi_encoder *mdp4_dsi_encoder;
- int ret;
- mdp4_dsi_encoder = kzalloc(sizeof(*mdp4_dsi_encoder), GFP_KERNEL);
- if (!mdp4_dsi_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_dsi_encoder = drmm_encoder_alloc(dev, struct mdp4_dsi_encoder, base,
+ NULL, DRM_MODE_ENCODER_DSI, NULL);
+ if (IS_ERR(mdp4_dsi_encoder))
+ return ERR_CAST(mdp4_dsi_encoder);
encoder = &mdp4_dsi_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(encoder, &mdp4_dsi_encoder_helper_funcs);
return encoder;
-
-fail:
- if (encoder)
- mdp4_dsi_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
#endif /* CONFIG_DRM_MSM_DSI */
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp4_dtv_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
- .destroy = mdp4_dtv_encoder_destroy,
-};
-
static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
/* initialize encoder */
struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_dtv_encoder *mdp4_dtv_encoder;
- int ret;
- mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
- if (!mdp4_dtv_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_dtv_encoder = drmm_encoder_alloc(dev, struct mdp4_dtv_encoder, base,
+ NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (IS_ERR(mdp4_dtv_encoder))
+ return ERR_CAST(mdp4_dtv_encoder);
encoder = &mdp4_dtv_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
- goto fail;
+ return ERR_CAST(mdp4_dtv_encoder->hdmi_clk);
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
- goto fail;
+ return ERR_CAST(mdp4_dtv_encoder->mdp_clk);
}
return encoder;
-
-fail:
- if (encoder)
- mdp4_dtv_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
struct drm_panel *panel;
struct clk *lcdc_clk;
unsigned long int pixclock;
- struct regulator *regs[3];
+ struct regulator_bulk_data regs[3];
bool enabled;
uint32_t bsc;
};
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
- to_mdp4_lcdc_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp4_lcdc_encoder);
-}
-
-static const struct drm_encoder_funcs mdp4_lcdc_encoder_funcs = {
- .destroy = mdp4_lcdc_encoder_destroy,
-};
-
/* this should probably be a helper: */
static struct drm_connector *get_connector(struct drm_encoder *encoder)
{
static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
to_mdp4_lcdc_encoder(encoder);
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
- int i, ret;
if (WARN_ON(!mdp4_lcdc_encoder->enabled))
return;
clk_disable_unprepare(mdp4_lcdc_encoder->lcdc_clk);
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
- }
+ regulator_bulk_disable(ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
mdp4_lcdc_encoder->enabled = false;
}
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
uint32_t config;
- int i, ret;
+ int ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
- for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
- ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
- }
+ ret = regulator_bulk_enable(ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulators: %d\n", ret);
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
struct device_node *panel_node)
{
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *encoder;
struct mdp4_lcdc_encoder *mdp4_lcdc_encoder;
- struct regulator *reg;
int ret;
- mdp4_lcdc_encoder = kzalloc(sizeof(*mdp4_lcdc_encoder), GFP_KERNEL);
- if (!mdp4_lcdc_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp4_lcdc_encoder = drmm_encoder_alloc(dev, struct mdp4_lcdc_encoder, base,
+ NULL, DRM_MODE_ENCODER_LVDS, NULL);
+ if (IS_ERR(mdp4_lcdc_encoder))
+ return ERR_CAST(mdp4_lcdc_encoder);
mdp4_lcdc_encoder->panel_node = panel_node;
encoder = &mdp4_lcdc_encoder->base;
- drm_encoder_init(dev, encoder, &mdp4_lcdc_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
- ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
- goto fail;
+ return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
}
/* TODO: different regulators in other cases? */
- reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[0] = reg;
-
- reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[1] = reg;
+ mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
+ mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
- reg = devm_regulator_get(dev->dev, "lvds-vdda");
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
- goto fail;
- }
- mdp4_lcdc_encoder->regs[2] = reg;
+ ret = devm_regulator_bulk_get(dev->dev,
+ ARRAY_SIZE(mdp4_lcdc_encoder->regs),
+ mdp4_lcdc_encoder->regs);
+ if (ret)
+ return ERR_PTR(ret);
return encoder;
-
-fail:
- if (encoder)
- mdp4_lcdc_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
return cfg_handler->revision;
}
-void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
-{
- kfree(cfg_handler);
-}
-
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor)
{
struct drm_device *dev = mdp5_kms->dev;
struct mdp5_cfg_handler *cfg_handler;
const struct mdp5_cfg_handler *cfg_handlers;
- int i, ret = 0, num_handlers;
+ int i, num_handlers;
- cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
+ cfg_handler = devm_kzalloc(dev->dev, sizeof(*cfg_handler), GFP_KERNEL);
if (unlikely(!cfg_handler)) {
- ret = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
switch (major) {
default:
DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
- ret = -ENXIO;
- goto fail;
+ return ERR_PTR(-ENXIO);
}
/* only after mdp5_cfg global pointer's init can we access the hw */
if (unlikely(!mdp5_cfg)) {
DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
- ret = -ENXIO;
- goto fail;
+ return ERR_PTR(-ENXIO);
}
cfg_handler->revision = minor;
DBG("MDP5: %s hw config selected", mdp5_cfg->name);
return cfg_handler;
-
-fail:
- if (cfg_handler)
- mdp5_cfg_destroy(cfg_handler);
-
- return ERR_PTR(ret);
}
struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
uint32_t major, uint32_t minor);
-void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
#endif /* __MDP5_CFG_H__ */
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
drm_gem_object_put(val);
}
-static void mdp5_crtc_destroy(struct drm_crtc *crtc)
+static void mdp5_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc *mdp5_crtc = ptr;
- drm_crtc_cleanup(crtc);
drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
-
- kfree(mdp5_crtc);
}
static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
{
struct drm_crtc *crtc = NULL;
struct mdp5_crtc *mdp5_crtc;
+ int ret;
- mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
- if (!mdp5_crtc)
- return ERR_PTR(-ENOMEM);
+ mdp5_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp5_crtc, base,
+ plane, cursor_plane,
+ cursor_plane ?
+ &mdp5_crtc_no_lm_cursor_funcs :
+ &mdp5_crtc_funcs,
+ NULL);
+ if (IS_ERR(mdp5_crtc))
+ return ERR_CAST(mdp5_crtc);
crtc = &mdp5_crtc->base;
mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
- drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
- cursor_plane ?
- &mdp5_crtc_no_lm_cursor_funcs :
- &mdp5_crtc_funcs, NULL);
-
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
+ ret = drmm_add_action_or_reset(dev, mdp5_crtc_flip_cleanup, mdp5_crtc);
+ if (ret)
+ return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
}
}
-void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
-{
- kfree(ctl_mgr);
-}
-
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
{
unsigned long flags;
int c, ret;
- ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
+ ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
- ret = -ENOMEM;
- goto fail;
+ return ERR_PTR(-ENOMEM);
}
if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
- ret = -ENOSPC;
- goto fail;
+ return ERR_PTR(-ENOSPC);
}
/* initialize the CTL manager: */
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
- goto fail;
+ return ERR_PTR(ret);
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
return ctl_mgr;
-
-fail:
- if (ctl_mgr)
- mdp5_ctlm_destroy(ctl_mgr);
-
- return ERR_PTR(ret);
}
struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
-void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
/*
* CTL prototypes:
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-static void mdp5_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mdp5_encoder);
-}
-
-static const struct drm_encoder_funcs mdp5_encoder_funcs = {
- .destroy = mdp5_encoder_destroy,
-};
-
static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
struct mdp5_encoder *mdp5_encoder;
int enc_type = (intf->type == INTF_DSI) ?
DRM_MODE_ENCODER_DSI : DRM_MODE_ENCODER_TMDS;
- int ret;
- mdp5_encoder = kzalloc(sizeof(*mdp5_encoder), GFP_KERNEL);
- if (!mdp5_encoder) {
- ret = -ENOMEM;
- goto fail;
- }
+ mdp5_encoder = drmm_encoder_alloc(dev, struct mdp5_encoder, base,
+ NULL, enc_type, NULL);
+ if (IS_ERR(mdp5_encoder))
+ return ERR_CAST(mdp5_encoder);
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
spin_lock_init(&mdp5_encoder->intf_lock);
- drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, enc_type, NULL);
-
drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
return encoder;
-
-fail:
- if (encoder)
- mdp5_encoder_destroy(encoder);
-
- return ERR_PTR(ret);
}
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_gem_address_space *aspace = kms->aspace;
- int i;
-
- for (i = 0; i < mdp5_kms->num_hwmixers; i++)
- mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
-
- for (i = 0; i < mdp5_kms->num_hwpipes; i++)
- mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
static void mdp5_destroy(struct mdp5_kms *mdp5_kms)
{
- int i;
-
- if (mdp5_kms->ctlm)
- mdp5_ctlm_destroy(mdp5_kms->ctlm);
- if (mdp5_kms->smp)
- mdp5_smp_destroy(mdp5_kms->smp);
- if (mdp5_kms->cfg)
- mdp5_cfg_destroy(mdp5_kms->cfg);
-
- for (i = 0; i < mdp5_kms->num_intfs; i++)
- kfree(mdp5_kms->intfs[i]);
-
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&mdp5_kms->pdev->dev);
for (i = 0; i < cnt; i++) {
struct mdp5_hw_pipe *hwpipe;
- hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
+ hwpipe = mdp5_pipe_init(dev, pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
for (i = 0; i < hw_cfg->lm.count; i++) {
struct mdp5_hw_mixer *mixer;
- mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ mixer = mdp5_mixer_init(dev, &hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
if (intf_types[i] == INTF_DISABLED)
continue;
- intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ intf = devm_kzalloc(dev->dev, sizeof(*intf), GFP_KERNEL);
if (!intf) {
DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
return 0;
}
-void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
-{
- kfree(mixer);
-}
-
static const char * const mixer_names[] = {
"LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
};
-struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev,
+ const struct mdp5_lm_instance *lm)
{
struct mdp5_hw_mixer *mixer;
- mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ mixer = devm_kzalloc(dev->dev, sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return ERR_PTR(-ENOMEM);
struct drm_crtc *hwmixer_to_crtc[8];
};
-struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
-void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+struct mdp5_hw_mixer *mdp5_mixer_init(struct drm_device *dev,
+ const struct mdp5_lm_instance *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
return 0;
}
-void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
-{
- kfree(hwpipe);
-}
-
-struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev,
+ enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps)
{
struct mdp5_hw_pipe *hwpipe;
- hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL);
+ hwpipe = devm_kzalloc(dev->dev, sizeof(*hwpipe), GFP_KERNEL);
if (!hwpipe)
return ERR_PTR(-ENOMEM);
struct mdp5_hw_pipe **r_hwpipe);
int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
-struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
+struct mdp5_hw_pipe *mdp5_pipe_init(struct drm_device *dev,
+ enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
-void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe);
#endif /* __MDP5_PIPE_H__ */
drm_modeset_unlock(&mdp5_kms->glob_state_lock);
}
-void mdp5_smp_destroy(struct mdp5_smp *smp)
-{
- kfree(smp);
-}
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
+ struct drm_device *dev = mdp5_kms->dev;
struct mdp5_smp_state *state;
struct mdp5_global_state *global_state;
struct mdp5_smp *smp;
- int ret;
- smp = kzalloc(sizeof(*smp), GFP_KERNEL);
- if (unlikely(!smp)) {
- ret = -ENOMEM;
- goto fail;
- }
+ smp = devm_kzalloc(dev->dev, sizeof(*smp), GFP_KERNEL);
+ if (unlikely(!smp))
+ return ERR_PTR(-ENOMEM);
smp->dev = mdp5_kms->dev;
smp->blk_cnt = cfg->mmb_count;
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
return smp;
-fail:
- if (smp)
- mdp5_smp_destroy(smp);
-
- return ERR_PTR(ret);
}
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms,
const struct mdp5_smp_block *cfg);
-void mdp5_smp_destroy(struct mdp5_smp *smp);
void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p);
return -EINVAL;
}
+ ret = pm_runtime_resume_and_get(dp_aux->dev);
+ if (ret)
+ return ret;
+
mutex_lock(&aux->mutex);
if (!aux->initted) {
ret = -EIO;
exit:
mutex_unlock(&aux->mutex);
+ pm_runtime_put_sync(dp_aux->dev);
return ret;
}
int dp_aux_register(struct drm_dp_aux *dp_aux)
{
- struct dp_aux_private *aux;
int ret;
if (!dp_aux) {
return -EINVAL;
}
- aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
- aux->dp_aux.name = "dpu_dp_aux";
- aux->dp_aux.dev = aux->dev;
- aux->dp_aux.transfer = dp_aux_transfer;
- ret = drm_dp_aux_register(&aux->dp_aux);
+ ret = drm_dp_aux_register(dp_aux);
if (ret) {
DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
ret);
drm_dp_aux_unregister(dp_aux);
}
+static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
+ unsigned long wait_us)
+{
+ int ret;
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+ pm_runtime_get_sync(aux->dev);
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ pm_runtime_put_sync(aux->dev);
+
+ return ret;
+}
+
struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
bool is_edp)
{
aux->catalog = catalog;
aux->retry_cnt = 0;
+ /*
+ * Use the drm_dp_aux_init() to use the aux adapter
+ * before registering AUX with the DRM device so that
+ * msm eDP panel can be detected by generic_dep_panel_probe().
+ */
+ aux->dp_aux.name = "dpu_dp_aux";
+ aux->dp_aux.dev = dev;
+ aux->dp_aux.transfer = dp_aux_transfer;
+ aux->dp_aux.wait_hpd_asserted = dp_wait_hpd_asserted;
+ drm_dp_aux_init(&aux->dp_aux);
+
return &aux->dp_aux;
}
#define DEBUG_NAME "msm_dp"
struct dp_debug_private {
- struct dentry *root;
-
struct dp_link *link;
struct dp_panel *panel;
struct drm_connector *connector;
- struct device *dev;
- struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
.write = dp_test_active_write
};
-static void dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+static void dp_debug_init(struct dp_debug *dp_debug, struct dentry *root, bool is_edp)
{
- char path[64];
struct dp_debug_private *debug = container_of(dp_debug,
struct dp_debug_private, dp_debug);
- snprintf(path, sizeof(path), "msm_dp-%s", debug->connector->name);
-
- debug->root = debugfs_create_dir(path, minor->debugfs_root);
-
- debugfs_create_file("dp_debug", 0444, debug->root,
+ debugfs_create_file("dp_debug", 0444, root,
debug, &dp_debug_fops);
- debugfs_create_file("msm_dp_test_active", 0444,
- debug->root,
- debug, &test_active_fops);
+ if (!is_edp) {
+ debugfs_create_file("msm_dp_test_active", 0444,
+ root,
+ debug, &test_active_fops);
- debugfs_create_file("msm_dp_test_data", 0444,
- debug->root,
- debug, &dp_test_data_fops);
+ debugfs_create_file("msm_dp_test_data", 0444,
+ root,
+ debug, &dp_test_data_fops);
- debugfs_create_file("msm_dp_test_type", 0444,
- debug->root,
- debug, &dp_test_type_fops);
+ debugfs_create_file("msm_dp_test_type", 0444,
+ root,
+ debug, &dp_test_type_fops);
+ }
}
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
- struct drm_connector *connector, struct drm_minor *minor)
+ struct drm_connector *connector,
+ struct dentry *root, bool is_edp)
{
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
debug->dp_debug.debug_en = false;
debug->link = link;
debug->panel = panel;
- debug->dev = dev;
- debug->drm_dev = minor->dev;
- debug->connector = connector;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
- dp_debug_init(dp_debug, minor);
+ dp_debug_init(dp_debug, root, is_edp);
return dp_debug;
error:
return ERR_PTR(rc);
}
-
-static int dp_debug_deinit(struct dp_debug *dp_debug)
-{
- struct dp_debug_private *debug;
-
- if (!dp_debug)
- return -EINVAL;
-
- debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
- debugfs_remove_recursive(debug->root);
-
- return 0;
-}
-
-void dp_debug_put(struct dp_debug *dp_debug)
-{
- struct dp_debug_private *debug;
-
- if (!dp_debug)
- return;
-
- debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
-
- dp_debug_deinit(dp_debug);
-
- devm_kfree(debug->dev, debug);
-}
* @panel: instance of panel module
* @link: instance of link module
* @connector: double pointer to display connector
- * @minor: pointer to drm minor number after device registration
+ * @root: connector's debugfs root
+ * @is_edp: set for eDP connectors / panels
* return: pointer to allocated debug module data
*
* This function sets up the debug module and provides a way
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
struct drm_connector *connector,
- struct drm_minor *minor);
-
-/**
- * dp_debug_put()
- *
- * Cleans up dp_debug instance
- *
- * @dp_debug: instance of dp_debug
- */
-void dp_debug_put(struct dp_debug *dp_debug);
+ struct dentry *root,
+ bool is_edp);
#else
static inline
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_link *link,
- struct drm_connector *connector, struct drm_minor *minor)
+ struct drm_connector *connector,
+ struct dentry *root,
+ bool is_edp)
{
return ERR_PTR(-EINVAL);
}
-static inline void dp_debug_put(struct dp_debug *dp_debug)
-{
-}
-
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* _DP_DEBUG_H_ */
ST_CONNECTED,
ST_DISCONNECT_PENDING,
ST_DISPLAY_OFF,
- ST_SUSPENDED,
};
enum {
EV_NO_EVENT,
/* hpd events */
- EV_HPD_INIT_SETUP,
EV_HPD_PLUG_INT,
EV_IRQ_HPD_INT,
EV_HPD_UNPLUG_INT,
{}
};
+static const struct msm_dp_desc sm8650_dp_descs[] = {
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ {}
+};
+
static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
{ .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs },
{ .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
{ .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs },
+ { .compatible = "qcom,sm8650-dp", .data = &sm8650_dp_descs },
{}
};
dp->dp_display.drm_dev = drm;
priv->dp[dp->id] = &dp->dp_display;
- rc = dp->parser->parse(dp->parser);
- if (rc) {
- DRM_ERROR("device tree parsing failed\n");
- goto end;
- }
dp->drm_dev = drm;
goto end;
}
- rc = dp_power_client_init(dp->power);
- if (rc) {
- DRM_ERROR("Power client create failed\n");
- goto end;
- }
rc = dp_register_audio_driver(dev, dp->audio);
if (rc) {
struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
- /* disable all HPD interrupts */
- if (dp->core_initialized)
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
-
kthread_stop(dp->ev_tsk);
of_dp_aux_depopulate_bus(dp->aux);
- dp_power_client_deinit(dp->power);
dp_unregister_audio_driver(dev, dp->audio);
dp_aux_unregister(dp->aux);
dp->drm_dev = NULL;
.unbind = dp_display_unbind,
};
-static void dp_display_send_hpd_event(struct msm_dp *dp_display)
-{
- struct dp_display_private *dp;
- struct drm_connector *connector;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- connector = dp->dp_display.connector;
- drm_helper_hpd_irq_event(connector->dev);
-}
-
-
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- if ((hpd && dp->dp_display.is_connected) ||
- (!hpd && !dp->dp_display.is_connected)) {
- drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
- (hpd ? "on" : "off"));
- return 0;
- }
+ struct drm_bridge *bridge = dp->dp_display.bridge;
/* reset video pattern flag on disconnect */
if (!hpd) {
dp->panel->downstream_ports);
}
- dp->dp_display.is_connected = hpd;
+ dp->dp_display.link_ready = hpd;
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
- dp_display_send_hpd_event(&dp->dp_display);
+ drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
return 0;
}
{
u32 state;
int ret;
+ struct platform_device *pdev = dp->dp_display.pdev;
mutex_lock(&dp->event_mutex);
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
return 0;
}
- ret = dp_display_usbpd_configure_cb(&dp->dp_display.pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret) {
+ DRM_ERROR("failed to pm_runtime_resume\n");
+ mutex_unlock(&dp->event_mutex);
+ return ret;
+ }
+
+ ret = dp_display_usbpd_configure_cb(&pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
} else {
static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
+ struct platform_device *pdev = dp->dp_display.pdev;
mutex_lock(&dp->event_mutex);
dp->dp_display.connector_type, state);
/* uevent will complete disconnection part */
+ pm_runtime_put_sync(&pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
- if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
+ if (state == ST_DISPLAY_OFF) {
mutex_unlock(&dp->event_mutex);
return 0;
}
static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
{
- dp_debug_put(dp->debug);
dp_audio_put(dp->audio);
dp_panel_put(dp->panel);
dp_aux_put(dp->aux);
dp_display->plugged_cb = fn;
dp_display->codec_dev = codec_dev;
- plugged = dp_display->is_connected;
+ plugged = dp_display->link_ready;
dp_display_handle_plugged_change(dp_display, plugged);
return 0;
spin_unlock_irqrestore(&dp_priv->event_lock, flag);
switch (todo->event_id) {
- case EV_HPD_INIT_SETUP:
- dp_display_host_init(dp_priv);
- break;
case EV_HPD_PLUG_INT:
dp_hpd_plug_handle(dp_priv, todo->data);
break;
return ret;
}
-int dp_display_request_irq(struct msm_dp *dp_display)
+static int dp_display_request_irq(struct dp_display_private *dp)
{
int rc = 0;
- struct dp_display_private *dp;
-
- if (!dp_display) {
- DRM_ERROR("invalid input\n");
- return -EINVAL;
- }
+ struct platform_device *pdev = dp->dp_display.pdev;
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- dp->irq = irq_of_parse_and_map(dp->dp_display.pdev->dev.of_node, 0);
- if (!dp->irq) {
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0) {
DRM_ERROR("failed to get irq\n");
- return -EINVAL;
+ return dp->irq;
}
- rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq,
- dp_display_irq_handler,
- IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+ rc = devm_request_irq(&pdev->dev, dp->irq, dp_display_irq_handler,
+ IRQF_TRIGGER_HIGH|IRQF_NO_AUTOEN,
+ "dp_display_isr", dp);
+
if (rc < 0) {
DRM_ERROR("failed to request IRQ%u: %d\n",
dp->irq, rc);
return NULL;
}
+static int dp_auxbus_done_probe(struct drm_dp_aux *aux)
+{
+ int rc;
+
+ rc = component_add(aux->dev, &dp_display_comp_ops);
+ if (rc)
+ DRM_ERROR("eDP component add failed, rc=%d\n", rc);
+
+ return rc;
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
return -EPROBE_DEFER;
}
+ rc = dp->parser->parse(dp->parser);
+ if (rc) {
+ DRM_ERROR("device tree parsing failed\n");
+ goto err;
+ }
+
+ rc = dp_power_client_init(dp->power);
+ if (rc) {
+ DRM_ERROR("Power client create failed\n");
+ goto err;
+ }
+
/* setup event q */
mutex_init(&dp->event_mutex);
init_waitqueue_head(&dp->event_q);
platform_set_drvdata(pdev, &dp->dp_display);
- rc = component_add(&pdev->dev, &dp_display_comp_ops);
- if (rc) {
- DRM_ERROR("component add failed, rc=%d\n", rc);
- dp_display_deinit_sub_modules(dp);
+ rc = devm_pm_runtime_enable(&pdev->dev);
+ if (rc)
+ goto err;
+
+ rc = dp_display_request_irq(dp);
+ if (rc)
+ goto err;
+
+ if (dp->dp_display.is_edp) {
+ rc = devm_of_dp_aux_populate_bus(dp->aux, dp_auxbus_done_probe);
+ if (rc) {
+ DRM_ERROR("eDP auxbus population failed, rc=%d\n", rc);
+ goto err;
+ }
+ } else {
+ rc = component_add(&pdev->dev, &dp_display_comp_ops);
+ if (rc) {
+ DRM_ERROR("component add failed, rc=%d\n", rc);
+ goto err;
+ }
}
return rc;
+
+err:
+ dp_display_deinit_sub_modules(dp);
+ return rc;
}
static void dp_display_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dp_display_comp_ops);
dp_display_deinit_sub_modules(dp);
-
platform_set_drvdata(pdev, NULL);
}
-static int dp_pm_resume(struct device *dev)
+static int dp_pm_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dp *dp_display = platform_get_drvdata(pdev);
- struct dp_display_private *dp;
- int sink_count = 0;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- mutex_lock(&dp->event_mutex);
-
- drm_dbg_dp(dp->drm_dev,
- "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- /* start from disconnected state */
- dp->hpd_state = ST_DISCONNECTED;
-
- /* turn on dp ctrl/phy */
- dp_display_host_init(dp);
-
- if (dp_display->is_edp)
- dp_catalog_ctrl_hpd_enable(dp->catalog);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
- if (dp_catalog_link_is_connected(dp->catalog)) {
- /*
- * set sink to normal operation mode -- D0
- * before dpcd read
- */
- dp_display_host_phy_init(dp);
- dp_link_psm_config(dp->link, &dp->panel->link_info, false);
- sink_count = drm_dp_read_sink_count(dp->aux);
- if (sink_count < 0)
- sink_count = 0;
+ disable_irq(dp->irq);
+ if (dp->dp_display.is_edp) {
dp_display_host_phy_exit(dp);
+ dp_catalog_ctrl_hpd_disable(dp->catalog);
}
-
- dp->link->sink_count = sink_count;
- /*
- * can not declared display is connected unless
- * HDMI cable is plugged in and sink_count of
- * dongle become 1
- * also only signal audio when disconnected
- */
- if (dp->link->sink_count) {
- dp->dp_display.is_connected = true;
- } else {
- dp->dp_display.is_connected = false;
- dp_display_handle_plugged_change(dp_display, false);
- }
-
- drm_dbg_dp(dp->drm_dev,
- "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
- dp->dp_display.connector_type, dp->link->sink_count,
- dp->dp_display.is_connected, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- mutex_unlock(&dp->event_mutex);
+ dp_display_host_deinit(dp);
return 0;
}
-static int dp_pm_suspend(struct device *dev)
+static int dp_pm_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_dp *dp_display = platform_get_drvdata(pdev);
- struct dp_display_private *dp;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- mutex_lock(&dp->event_mutex);
-
- drm_dbg_dp(dp->drm_dev,
- "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
-
- /* mainlink enabled */
- if (dp_power_clk_status(dp->power, DP_CTRL_PM))
- dp_ctrl_off_link_stream(dp->ctrl);
-
- dp_display_host_phy_exit(dp);
-
- /* host_init will be called at pm_resume */
- dp_display_host_deinit(dp);
-
- dp->hpd_state = ST_SUSPENDED;
-
- drm_dbg_dp(dp->drm_dev,
- "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
- dp->dp_display.connector_type, dp->core_initialized,
- dp->phy_initialized, dp_display->power_on);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
- mutex_unlock(&dp->event_mutex);
+ /*
+ * for eDP, host cotroller, HPD block and PHY are enabled here
+ * but with HPD irq disabled
+ *
+ * for DP, only host controller is enabled here.
+ * HPD block is enabled at dp_bridge_hpd_enable()
+ * PHY will be enabled at plugin handler later
+ */
+ dp_display_host_init(dp);
+ if (dp->dp_display.is_edp) {
+ dp_catalog_ctrl_hpd_enable(dp->catalog);
+ dp_display_host_phy_init(dp);
+ }
+ enable_irq(dp->irq);
return 0;
}
static const struct dev_pm_ops dp_pm_ops = {
- .suspend = dp_pm_suspend,
- .resume = dp_pm_resume,
+ SET_RUNTIME_PM_OPS(dp_pm_runtime_suspend, dp_pm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver dp_display_driver = {
platform_driver_unregister(&dp_display_driver);
}
-void msm_dp_irq_postinstall(struct msm_dp *dp_display)
-{
- struct dp_display_private *dp;
-
- if (!dp_display)
- return;
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
-
- if (!dp_display->is_edp)
- dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0);
-}
-
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
struct dp_display_private *dp;
return dp->wide_bus_en;
}
-void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *root, bool is_edp)
{
struct dp_display_private *dp;
struct device *dev;
dp->debug = dp_debug_get(dev, dp->panel,
dp->link, dp->dp_display.connector,
- minor);
+ root, is_edp);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
{
int rc;
struct dp_display_private *dp_priv;
- struct device_node *aux_bus;
- struct device *dev;
dp_priv = container_of(dp, struct dp_display_private, dp_display);
- dev = &dp_priv->dp_display.pdev->dev;
- aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
-
- if (aux_bus && dp->is_edp) {
- dp_display_host_init(dp_priv);
- dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
- dp_display_host_phy_init(dp_priv);
-
- /*
- * The code below assumes that the panel will finish probing
- * by the time devm_of_dp_aux_populate_ep_devices() returns.
- * This isn't a great assumption since it will fail if the
- * panel driver is probed asynchronously but is the best we
- * can do without a bigger driver reorganization.
- */
- rc = of_dp_aux_populate_bus(dp_priv->aux, NULL);
- of_node_put(aux_bus);
- if (rc)
- goto error;
- } else if (dp->is_edp) {
- DRM_ERROR("eDP aux_bus not found\n");
- return -ENODEV;
- }
/*
* External bridges are mandatory for eDP interfaces: one has to
if (!dp->is_edp && rc == -ENODEV)
return 0;
- if (!rc) {
+ if (!rc)
dp->next_bridge = dp_priv->parser->next_bridge;
- return 0;
- }
-error:
- if (dp->is_edp) {
- of_dp_aux_depopulate_bus(dp_priv->aux);
- dp_display_host_phy_exit(dp_priv);
- dp_display_host_deinit(dp_priv);
- }
return rc;
}
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
- ret = dp_display_request_irq(dp_display);
- if (ret) {
- DRM_ERROR("request_irq failed, ret=%d\n", ret);
- return ret;
- }
-
ret = dp_display_get_next_bridge(dp_display);
if (ret)
return ret;
dp_hpd_plug_handle(dp_display, 0);
mutex_lock(&dp_display->event_mutex);
+ if (pm_runtime_resume_and_get(&dp->pdev->dev)) {
+ DRM_ERROR("failed to pm_runtime_resume\n");
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
state = dp_display->hpd_state;
if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
mutex_lock(&dp_display->event_mutex);
state = dp_display->hpd_state;
- if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) {
- mutex_unlock(&dp_display->event_mutex);
- return;
- }
+ if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED)
+ drm_dbg_dp(dp->drm_dev, "type=%d wrong hpd_state=%d\n",
+ dp->connector_type, state);
dp_display_disable(dp_display);
}
drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
+
+ pm_runtime_put_sync(&dp->pdev->dev);
mutex_unlock(&dp_display->event_mutex);
}
struct msm_dp *dp_display = dp_bridge->dp_display;
struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+ /*
+ * this is for external DP with hpd irq enabled case,
+ * step-1: dp_pm_runtime_resume() enable dp host only
+ * step-2: enable hdp block and have hpd irq enabled here
+ * step-3: waiting for plugin irq while phy is not initialized
+ * step-4: DP PHY is initialized at plugin handler before link training
+ *
+ */
mutex_lock(&dp->event_mutex);
+ if (pm_runtime_resume_and_get(&dp_display->pdev->dev)) {
+ DRM_ERROR("failed to resume power\n");
+ mutex_unlock(&dp->event_mutex);
+ return;
+ }
+
dp_catalog_ctrl_hpd_enable(dp->catalog);
/* enable HDP interrupts */
dp_catalog_ctrl_hpd_disable(dp->catalog);
dp_display->internal_hpd = false;
+
+ pm_runtime_put_sync(&dp_display->pdev->dev);
mutex_unlock(&dp->event_mutex);
}
if (dp_display->internal_hpd)
return;
- if (!dp->core_initialized) {
- drm_dbg_dp(dp->drm_dev, "not initialized\n");
- return;
- }
-
- if (!dp_display->is_connected && status == connector_status_connected)
+ if (!dp_display->link_ready && status == connector_status_connected)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
- else if (dp_display->is_connected && status == connector_status_disconnected)
+ else if (dp_display->link_ready && status == connector_status_disconnected)
dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
}
struct drm_bridge *bridge;
struct drm_connector *connector;
struct drm_bridge *next_bridge;
- bool is_connected;
+ bool link_ready;
bool audio_enabled;
bool power_on;
unsigned int connector_type;
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
int dp_display_get_modes(struct msm_dp *dp_display);
-int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
void dp_display_signal_audio_start(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
void dp_display_set_psr(struct msm_dp *dp, bool enter);
+void dp_display_debugfs_init(struct msm_dp *dp_display, struct dentry *dentry, bool is_edp);
#endif /* _DP_DISPLAY_H_ */
dp = to_dp_bridge(bridge)->dp_display;
- drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
- (dp->is_connected) ? "true" : "false");
+ drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
+ (dp->link_ready) ? "true" : "false");
- return (dp->is_connected) ? connector_status_connected :
+ return (dp->link_ready) ? connector_status_connected :
connector_status_disconnected;
}
dp = to_dp_bridge(bridge)->dp_display;
- drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
- (dp->is_connected) ? "true" : "false");
+ drm_dbg_dp(dp->drm_dev, "link_ready = %s\n",
+ (dp->link_ready) ? "true" : "false");
/*
* There is no protection in the DRM framework to check if the display
* After that this piece of code can be removed.
*/
if (bridge->ops & DRM_BRIDGE_OP_HPD)
- return (dp->is_connected) ? 0 : -ENOTCONN;
+ return (dp->link_ready) ? 0 : -ENOTCONN;
return 0;
}
dp = to_dp_bridge(bridge)->dp_display;
/* pluggable case assumes EDID is read when HPD */
- if (dp->is_connected) {
+ if (dp->link_ready) {
rc = dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
return rc;
}
+static void dp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+
+ dp_display_debugfs_init(dp, root, false);
+}
+
static const struct drm_bridge_funcs dp_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.hpd_enable = dp_bridge_hpd_enable,
.hpd_disable = dp_bridge_hpd_disable,
.hpd_notify = dp_bridge_hpd_notify,
+ .debugfs_init = dp_bridge_debugfs_init,
};
static int edp_bridge_atomic_check(struct drm_bridge *drm_bridge,
return MODE_OK;
}
+static void edp_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct msm_dp *dp = to_dp_bridge(bridge)->dp_display;
+
+ dp_display_debugfs_init(dp, root, true);
+}
+
static const struct drm_bridge_funcs edp_bridge_ops = {
.atomic_enable = edp_bridge_atomic_enable,
.atomic_disable = edp_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_check = edp_bridge_atomic_check,
+ .debugfs_init = edp_bridge_debugfs_init,
};
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
power = container_of(dp_power, struct dp_power_private, dp_power);
- pm_runtime_enable(power->dev);
-
return dp_power_clk_init(power);
}
-void dp_power_client_deinit(struct dp_power *dp_power)
-{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- pm_runtime_disable(power->dev);
-}
-
int dp_power_init(struct dp_power *dp_power)
{
- int rc = 0;
- struct dp_power_private *power = NULL;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- pm_runtime_get_sync(power->dev);
-
- rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
- if (rc)
- pm_runtime_put_sync(power->dev);
-
- return rc;
+ return dp_power_clk_enable(dp_power, DP_CORE_PM, true);
}
int dp_power_deinit(struct dp_power *dp_power)
{
- struct dp_power_private *power;
-
- power = container_of(dp_power, struct dp_power_private, dp_power);
-
- dp_power_clk_enable(dp_power, DP_CORE_PM, false);
- pm_runtime_put_sync(power->dev);
- return 0;
+ return dp_power_clk_enable(dp_power, DP_CORE_PM, false);
}
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
*/
int dp_power_client_init(struct dp_power *power);
-/**
- * dp_power_clinet_deinit() - de-initialize clock and regulator modules
- *
- * @power: instance of power module
- * return: 0 for success, error for failure.
- *
- * This API will de-initialize the DisplayPort's clocks and regulator
- * modules.
- */
-void dp_power_client_deinit(struct dp_power *power);
-
/**
* dp_power_get() - configure and get the DisplayPort power module data
*
},
};
+static const struct regulator_bulk_data sm8650_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 16600 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sm8650_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sm8650_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sm8650_dsi_regulators),
+ .bus_clk_names = dsi_v2_4_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_v2_4_clk_names),
+ .io_start = {
+ { 0xae94000, 0xae96000 },
+ },
+};
+
static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
{ .supply = "refgen" },
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
&sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_8_0,
+ &sm8650_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
+#define MSM_DSI_6G_VER_MINOR_V2_8_0 0x20080000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
struct device *dev = &phy->pdev->dev;
int ret;
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
.data = &dsi_phy_5nm_8450_cfgs },
{ .compatible = "qcom,sm8550-dsi-phy-4nm",
.data = &dsi_phy_4nm_8550_cfgs },
+ { .compatible = "qcom,sm8650-dsi-phy-4nm",
+ .data = &dsi_phy_4nm_8650_cfgs },
#endif
{}
};
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
"Unable to get ahb clk\n");
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_98000uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 98000 },
+};
+
static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 97800 },
};
.num_dsi_phy = 2,
.quirks = DSI_PHY_7NM_QUIRK_V5_2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_4nm_8650_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98000uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98000uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
return ret;
}
-void msm_debugfs_init(struct drm_minor *minor)
+static void msm_debugfs_gpu_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dentry *gpu_devfreq;
- drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
-
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms) {
- drm_debugfs_create_files(msm_kms_debugfs_list,
- ARRAY_SIZE(msm_kms_debugfs_list),
- minor->debugfs_root, minor);
- debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
- dev, &msm_kms_fops);
- }
-
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
- debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
- dev, &shrink_fops);
-
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
debugfs_create_u32("downdifferential",0600, gpu_devfreq,
&priv->gpu_devfreq_config.downdifferential);
+}
+
+void msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (priv->gpu_pdev)
+ msm_debugfs_gpu_init(minor);
+
+ if (priv->kms) {
+ drm_debugfs_create_files(msm_kms_debugfs_list,
+ ARRAY_SIZE(msm_kms_debugfs_list),
+ minor->debugfs_root, minor);
+ debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
+ dev, &msm_kms_fops);
+ }
+
+ debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
+ dev, &shrink_fops);
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
* - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
+ * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 10
+#define MSM_VERSION_MINOR 12
#define MSM_VERSION_PATCHLEVEL 0
static void msm_deinit_vram(struct drm_device *ddev);
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
+static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object *obj,
+ __user void *metadata,
+ u32 metadata_size)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *buf;
+ int ret;
+
+ /* Impose a moderate upper bound on metadata size: */
+ if (metadata_size > 128) {
+ return -EOVERFLOW;
+ }
+
+ /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
+ buf = memdup_user(metadata, metadata_size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = msm_gem_lock_interruptible(obj);
+ if (ret)
+ goto out;
+
+ msm_obj->metadata =
+ krealloc(msm_obj->metadata, metadata_size, GFP_KERNEL);
+ msm_obj->metadata_size = metadata_size;
+ memcpy(msm_obj->metadata, buf, metadata_size);
+
+ msm_gem_unlock(obj);
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object *obj,
+ __user void *metadata,
+ u32 *metadata_size)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ void *buf;
+ int ret, len;
+
+ if (!metadata) {
+ /*
+ * Querying the size is inherently racey, but
+ * EXT_external_objects expects the app to confirm
+ * via device and driver UUIDs that the exporter and
+ * importer versions match. All we can do from the
+ * kernel side is check the length under obj lock
+ * when userspace tries to retrieve the metadata
+ */
+ *metadata_size = msm_obj->metadata_size;
+ return 0;
+ }
+
+ ret = msm_gem_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ /* Avoid copy_to_user() under gem obj lock: */
+ len = msm_obj->metadata_size;
+ buf = kmemdup(msm_obj->metadata, len, GFP_KERNEL);
+
+ msm_gem_unlock(obj);
+
+ if (*metadata_size < len) {
+ ret = -ETOOSMALL;
+ } else if (copy_to_user(metadata, buf, len)) {
+ ret = -EFAULT;
+ } else {
+ *metadata_size = len;
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file)
{
break;
case MSM_INFO_SET_NAME:
case MSM_INFO_GET_NAME:
+ case MSM_INFO_SET_METADATA:
+ case MSM_INFO_GET_METADATA:
break;
default:
return -EINVAL;
break;
case MSM_INFO_GET_NAME:
if (args->value && (args->len < strlen(msm_obj->name))) {
- ret = -EINVAL;
+ ret = -ETOOSMALL;
break;
}
args->len = strlen(msm_obj->name);
ret = -EFAULT;
}
break;
+ case MSM_INFO_SET_METADATA:
+ ret = msm_ioctl_gem_info_set_metadata(
+ obj, u64_to_user_ptr(args->value), args->len);
+ break;
+ case MSM_INFO_GET_METADATA:
+ ret = msm_ioctl_gem_info_get_metadata(
+ obj, u64_to_user_ptr(args->value), &args->len);
+ break;
}
drm_gem_object_put(obj);
* enum msm_event_wait - type of HW events to wait for
* @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
* @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
- * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
*/
enum msm_event_wait {
MSM_ENC_COMMIT_DONE = 0,
MSM_ENC_TX_COMPLETE,
- MSM_ENC_VBLANK,
};
/**
void __exit msm_dp_unregister(void);
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
-void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
-void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
#else
return -EINVAL;
}
-static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
-{
-}
-
static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
{
}
-static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
- struct drm_minor *minor)
-{
-}
-
static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
{
return false;
msm_gem_assert_locked(obj);
- if (GEM_WARN_ON(msm_obj->madv > madv)) {
- DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
+ if (msm_obj->madv > madv) {
+ DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
drm_gem_object_release(obj);
+ kfree(msm_obj->metadata);
kfree(msm_obj);
}
char name[32]; /* Identifier to print for the debugfs files */
+ /* userspace metadata backchannel */
+ void *metadata;
+ u32 metadata_size;
+
/**
* pin_count: Number of times the pages are pinned
*
wait_for_idle(struct drm_gem_object *obj)
{
enum dma_resv_usage usage = dma_resv_usage_rw(true);
- return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+ return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
}
static bool
#include "msm_gem.h"
#include "msm_gpu_trace.h"
+/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
+ * error msgs for debugging, but we don't spam dmesg by default
+ */
+#define SUBMIT_ERROR(submit, fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
+
/*
* Cmdstream submission:
*/
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
- submit = kzalloc(sz, GFP_KERNEL);
+ submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit)
return ERR_PTR(-ENOMEM);
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
- DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
+ SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
i = 0;
goto out;
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
+ SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
- DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
+ SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
return -EINVAL;
}
if (submit_cmd.size % 4) {
- DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
- submit_cmd.size);
+ SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
+ submit_cmd.size);
ret = -EINVAL;
goto out;
}
ret = -ENOMEM;
goto out;
}
- submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
if (!submit->cmd[i].relocs) {
ret = -ENOMEM;
goto out;
fail:
if (ret == -EALREADY) {
- DRM_ERROR("handle %u at index %u already on submit list\n",
- submit->bos[i].handle, i);
+ SUBMIT_ERROR(submit, "handle %u at index %u already on submit list\n",
+ submit->bos[i].handle, i);
ret = -EINVAL;
}
struct drm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
- DRM_ERROR("invalid buffer index: %u (out of %u)\n",
- idx, submit->nr_bos);
+ SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
+ idx, submit->nr_bos);
return -EINVAL;
}
return 0;
if (offset % 4) {
- DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
+ SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
bool valid;
if (submit_reloc.submit_offset % 4) {
- DRM_ERROR("non-aligned reloc offset: %u\n",
- submit_reloc.submit_offset);
+ SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
+ submit_reloc.submit_offset);
ret = -EINVAL;
goto out;
}
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
- DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
+ SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
ret = -EINVAL;
goto out;
}
if (!submit->cmd[i].size ||
((submit->cmd[i].size + submit->cmd[i].offset) >
obj->size / 4)) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
+ SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
goto out;
}
if (!gpu->allow_relocs) {
if (submit->cmd[i].nr_relocs) {
- DRM_ERROR("relocs not allowed\n");
+ SUBMIT_ERROR(submit, "relocs not allowed\n");
ret = -EINVAL;
goto out;
}
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
- /* FIXME: Release the crashstate if this errors out? */
- dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ dev_coredumpm(&gpu->pdev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
- if (submit) {
- /* Increment the fault counts */
- submit->queue->faults++;
- if (submit->aspace)
- submit->aspace->faults++;
- get_comm_cmdline(submit, &comm, &cmd);
+ /*
+ * If the submit retired while we were waiting for the worker to run,
+ * or waiting to acquire the gpu lock, then nothing more to do.
+ */
+ if (!submit)
+ goto out_unlock;
- if (comm && cmd) {
- DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, comm, cmd);
+ /* Increment the fault counts */
+ submit->queue->faults++;
+ if (submit->aspace)
+ submit->aspace->faults++;
- msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", comm, cmd);
- } else {
- msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
+
+ if (comm && cmd) {
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", comm, cmd);
} else {
- /*
- * We couldn't attribute this fault to any particular context,
- * so increment the global fault count instead.
- */
- gpu->global_faults++;
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: unknown\n", gpu->name);
+
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
/* Record the crash state */
pm_runtime_put(&gpu->pdev->dev);
+out_unlock:
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
+#define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
+
struct msm_mdss {
struct device *dev;
struct irq_domain *domain;
} irq_controller;
const struct msm_mdss_data *mdss_data;
- struct icc_path *path[2];
- u32 num_paths;
+ struct icc_path *mdp_path[2];
+ u32 num_mdp_paths;
+ struct icc_path *reg_bus_path;
};
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
{
struct icc_path *path0;
struct icc_path *path1;
+ struct icc_path *reg_bus_path;
- path0 = of_icc_get(dev, "mdp0-mem");
+ path0 = devm_of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
- msm_mdss->path[0] = path0;
- msm_mdss->num_paths = 1;
+ msm_mdss->mdp_path[0] = path0;
+ msm_mdss->num_mdp_paths = 1;
- path1 = of_icc_get(dev, "mdp1-mem");
+ path1 = devm_of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) {
- msm_mdss->path[1] = path1;
- msm_mdss->num_paths++;
+ msm_mdss->mdp_path[1] = path1;
+ msm_mdss->num_mdp_paths++;
}
- return 0;
-}
-
-static void msm_mdss_put_icc_path(void *data)
-{
- struct msm_mdss *msm_mdss = data;
- int i;
-
- for (i = 0; i < msm_mdss->num_paths; i++)
- icc_put(msm_mdss->path[i]);
-}
-
-static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
-{
- int i;
+ reg_bus_path = of_icc_get(dev, "cpu-cfg");
+ if (!IS_ERR_OR_NULL(reg_bus_path))
+ msm_mdss->reg_bus_path = reg_bus_path;
- for (i = 0; i < msm_mdss->num_paths; i++)
- icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
+ return 0;
}
static void msm_mdss_irq(struct irq_desc *desc)
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
- int ret;
+ int ret, i;
/*
* Several components have AXI clocks that can only be turned on if
* the interconnect is enabled (non-zero bandwidth). Let's make sure
* that the interconnects are at least at a minimum amount.
*/
- msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
+ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
+ icc_set_bw(msm_mdss->mdp_path[i], 0, Bps_to_icc(MIN_IB_BW));
+
+ if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw)
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ msm_mdss->mdss_data->reg_bus_bw);
+ else
+ icc_set_bw(msm_mdss->reg_bus_path, 0,
+ DEFAULT_REG_BW);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
+ int i;
+
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
- msm_mdss_icc_request_bw(msm_mdss, 0);
+
+ for (i = 0; i < msm_mdss->num_mdp_paths; i++)
+ icc_set_bw(msm_mdss->mdp_path[i], 0, 0);
+
+ if (msm_mdss->reg_bus_path)
+ icc_set_bw(msm_mdss->reg_bus_path, 0, 0);
return 0;
}
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
+ msm_mdss->mdss_data = of_device_get_match_data(&pdev->dev);
+
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
- if (ret)
- return ERR_PTR(ret);
- ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
if (ret)
return ERR_PTR(ret);
if (IS_ERR(mdss))
return PTR_ERR(mdss);
- mdss->mdss_data = of_device_get_match_data(&pdev->dev);
-
platform_set_drvdata(pdev, mdss);
/*
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data qcm2290_data = {
/* no UBWC */
.highest_bank_bit = 0x2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc7180_data = {
.ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e,
.highest_bank_bit = 0x3,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc7280_data = {
.ubwc_static = 1,
.highest_bank_bit = 1,
.macrotile_mode = 1,
+ .reg_bus_bw = 74000,
};
static const struct msm_mdss_data sc8180x_data = {
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sc8280xp_data = {
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
- .highest_bank_bit = 2,
+ .highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
+};
+
+static const struct msm_mdss_data sdm670_data = {
+ .ubwc_enc_version = UBWC_2_0,
+ .ubwc_dec_version = UBWC_2_0,
+ .highest_bank_bit = 1,
};
static const struct msm_mdss_data sdm845_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6350_data = {
.ubwc_swizzle = 6,
.ubwc_static = 0x1e,
.highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm8150_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 2,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6115_data = {
.ubwc_swizzle = 7,
.ubwc_static = 0x11f,
.highest_bank_bit = 0x1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm6125_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 76800,
+};
+
+static const struct msm_mdss_data sm8350_data = {
+ .ubwc_enc_version = UBWC_4_0,
+ .ubwc_dec_version = UBWC_4_0,
+ .ubwc_swizzle = 6,
+ .ubwc_static = 1,
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ .highest_bank_bit = 3,
+ .macrotile_mode = 1,
+ .reg_bus_bw = 74000,
};
static const struct msm_mdss_data sm8550_data = {
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
+ .reg_bus_bw = 57000,
};
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
+ { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
- { .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
+ { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },
+ { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
+ { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data},
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
u32 ubwc_static;
u32 highest_bank_bit;
u32 macrotile_mode;
+ u32 reg_bus_bw;
};
#define UBWC_1_0 0x10000000
struct msm_rd_state *rd;
int ret;
+ if (!priv->gpu_pdev)
+ return 0;
+
/* only create on first minor: */
if (priv->rd)
return 0;
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
+#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
+#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */