diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
commit | 863981e96738983919de841ec669e157e6bdaeb0 (patch) | |
tree | d6d89a12e7eb8017837c057935a2271290907f76 /drivers/gpu/drm/msm | |
parent | 8dec7c70575785729a6a9e6719a955e9c545bcab (diff) |
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/gpu/drm/msm')
39 files changed, 934 insertions, 731 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 215495c27..167a4971f 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -23,6 +23,13 @@ config DRM_MSM_REGISTER_LOGGING that can be parsed by envytools demsm tool. If enabled, register logging can be switched on via msm.reglog=y module param. +config DRM_MSM_HDMI_HDCP + bool "Enable HDMI HDCP support in MSM DRM driver" + depends on DRM_MSM && QCOM_SCM + default y + help + Choose this option to enable HDCP state machine + config DRM_MSM_DSI bool "Enable DSI support in MSM DRM driver" depends on DRM_MSM diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index ddb4c9d09..60cb02624 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -10,7 +10,6 @@ msm-y := \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ hdmi/hdmi_connector.o \ - hdmi/hdmi_hdcp.o \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ @@ -40,8 +39,10 @@ msm-y := \ mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_smp.o \ msm_atomic.o \ + msm_debugfs.o \ msm_drv.o \ msm_fb.o \ + msm_fence.o \ msm_gem.o \ msm_gem_prime.o \ msm_gem_submit.o \ @@ -56,6 +57,8 @@ msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o +msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o + msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ mdp/mdp4/mdp4_dsi_encoder.o \ dsi/dsi_cfg.o \ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 8880f3eae..7e25f94af 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -120,8 +120,8 @@ void adreno_recover(struct msm_gpu *gpu) /* reset ringbuffer: */ gpu->rb->cur = gpu->rb->start; - /* reset completed fence seqno, just discard anything pending: */ - adreno_gpu->memptrs->fence = gpu->submitted_fence; + /* reset completed fence seqno: */ + adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; adreno_gpu->memptrs->rptr = 0; adreno_gpu->memptrs->wptr = 0; @@ -133,7 +133,7 @@ void adreno_recover(struct msm_gpu *gpu) } } -int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, +void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -168,7 +168,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, OUT_PKT2(ring); OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); - OUT_RING(ring, submit->fence); + OUT_RING(ring, submit->fence->seqno); if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { /* Flush HLSQ lazy updates to make sure there is nothing @@ -185,7 +185,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, rbmemptr(adreno_gpu, fence)); - OUT_RING(ring, submit->fence); + OUT_RING(ring, submit->fence->seqno); /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ OUT_PKT3(ring, CP_INTERRUPT, 1); @@ -212,8 +212,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, #endif gpu->funcs->flush(gpu); - - return 0; } void adreno_flush(struct msm_gpu *gpu) @@ -254,7 +252,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) adreno_gpu->rev.patchid); seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, - gpu->submitted_fence); + gpu->fctx->last_fence); seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); @@ -295,7 +293,7 @@ void adreno_dump_info(struct msm_gpu *gpu) adreno_gpu->rev.patchid); printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, - gpu->submitted_fence); + gpu->fctx->last_fence); printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("wptr: %d\n", adreno_gpu->memptrs->wptr); printk("rb wptr: %d\n", get_wptr(gpu->rb)); @@ -410,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); - if (!adreno_gpu->memptrs) { + if (IS_ERR(adreno_gpu->memptrs)) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 1d07511f4..a54f6e036 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -238,7 +238,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); uint32_t adreno_last_fence(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); -int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, +void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); void adreno_flush(struct msm_gpu *gpu); void adreno_idle(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 749fbb28e..03f115f53 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -41,8 +41,6 @@ enum msm_dsi_phy_type { /* Regulators for DSI devices */ struct dsi_reg_entry { char name[32]; - int min_voltage; - int max_voltage; int enable_load; int disable_load; }; diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index e58e9b91b..93c1ee094 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -22,9 +22,9 @@ static const struct msm_dsi_config apq8064_dsi_cfg = { .reg_cfg = { .num = 3, .regs = { - {"vdda", 1200000, 1200000, 100000, 100}, - {"avdd", 3000000, 3000000, 110000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, + {"vdda", 100000, 100}, /* 1.2 V */ + {"avdd", 10000, 100}, /* 3.0 V */ + {"vddio", 100000, 100}, /* 1.8 V */ }, }, .bus_clk_names = dsi_v2_bus_clk_names, @@ -40,10 +40,10 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { .reg_cfg = { .num = 4, .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdd", 3000000, 3000000, 150000, 100}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, + {"gdsc", -1, -1}, + {"vdd", 150000, 100}, /* 3.0 V */ + {"vdda", 100000, 100}, /* 1.2 V */ + {"vddio", 100000, 100}, /* 1.8 V */ }, }, .bus_clk_names = dsi_6g_bus_clk_names, @@ -59,9 +59,9 @@ static const struct msm_dsi_config msm8916_dsi_cfg = { .reg_cfg = { .num = 3, .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdda", 1200000, 1200000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, + {"gdsc", -1, -1}, + {"vdda", 100000, 100}, /* 1.2 V */ + {"vddio", 100000, 100}, /* 1.8 V */ }, }, .bus_clk_names = dsi_8916_bus_clk_names, @@ -73,13 +73,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { .reg_cfg = { .num = 7, .regs = { - {"gdsc", -1, -1, -1, -1}, - {"vdda", 1250000, 1250000, 100000, 100}, - {"vddio", 1800000, 1800000, 100000, 100}, - {"vcca", 1000000, 1000000, 10000, 100}, - {"vdd", 1800000, 1800000, 100000, 100}, - {"lab_reg", -1, -1, -1, -1}, - {"ibb_reg", -1, -1, -1, -1}, + {"gdsc", -1, -1}, + {"vdda", 100000, 100}, /* 1.25 V */ + {"vddio", 100000, 100}, /* 1.8 V */ + {"vcca", 10000, 100}, /* 1.0 V */ + {"vdd", 100000, 100}, /* 1.8 V */ + {"lab_reg", -1, -1}, + {"ibb_reg", -1, -1}, }, }, .bus_clk_names = dsi_6g_bus_clk_names, diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 4282ec6bb..a3e47ad83 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -325,18 +325,6 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host) return ret; } - for (i = 0; i < num; i++) { - if (regulator_can_change_voltage(s[i].consumer)) { - ret = regulator_set_voltage(s[i].consumer, - regs[i].min_voltage, regs[i].max_voltage); - if (ret < 0) { - pr_err("regulator %d set voltage failed, %d\n", - i, ret); - return ret; - } - } - } - return 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 58ba7ec17..c8d1f19c9 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -198,9 +198,13 @@ static enum drm_connector_status dsi_mgr_connector_detect( static void dsi_mgr_connector_destroy(struct drm_connector *connector) { + struct dsi_connector *dsi_connector = to_dsi_connector(connector); + DBG(""); - drm_connector_unregister(connector); + drm_connector_cleanup(connector); + + kfree(dsi_connector); } static void dsi_dual_connector_fix_modes(struct drm_connector *connector) @@ -538,12 +542,9 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) struct dsi_connector *dsi_connector; int ret, i; - dsi_connector = devm_kzalloc(msm_dsi->dev->dev, - sizeof(*dsi_connector), GFP_KERNEL); - if (!dsi_connector) { - ret = -ENOMEM; - goto fail; - } + dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); + if (!dsi_connector) + return ERR_PTR(-ENOMEM); dsi_connector->id = id; @@ -552,7 +553,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) ret = drm_connector_init(msm_dsi->dev, connector, &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI); if (ret) - goto fail; + return ERR_PTR(ret); drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs); @@ -565,21 +566,11 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - ret = drm_connector_register(connector); - if (ret) - goto fail; - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) drm_mode_connector_attach_encoder(connector, msm_dsi->encoders[i]); return connector; - -fail: - if (connector) - dsi_mgr_connector_destroy(connector); - - return ERR_PTR(ret); } /* initialize bridge */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 91a95fb04..e2f42d8ea 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -177,19 +177,6 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) return ret; } - for (i = 0; i < num; i++) { - if (regulator_can_change_voltage(s[i].consumer)) { - ret = regulator_set_voltage(s[i].consumer, - regs[i].min_voltage, regs[i].max_voltage); - if (ret < 0) { - dev_err(dev, - "regulator %d set voltage failed, %d\n", - i, ret); - return ret; - } - } - } - return 0; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index 2e9ba118d..f4bc11af8 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -138,8 +138,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .reg_cfg = { .num = 2, .regs = { - {"vddio", 1800000, 1800000, 100000, 100}, - {"vcca", 1000000, 1000000, 10000, 100}, + {"vddio", 100000, 100}, /* 1.8 V */ + {"vcca", 10000, 100}, /* 1.0 V */ }, }, .ops = { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index edf74110c..96d1852af 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -138,7 +138,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .reg_cfg = { .num = 1, .regs = { - {"vddio", 1800000, 1800000, 100000, 100}, + {"vddio", 100000, 100}, }, }, .ops = { @@ -153,7 +153,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .reg_cfg = { .num = 1, .regs = { - {"vddio", 1800000, 1800000, 100000, 100}, + {"vddio", 100000, 100}, /* 1.8 V */ }, }, .ops = { diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 197b039ca..213355a3e 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -185,7 +185,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { .reg_cfg = { .num = 1, .regs = { - {"vddio", 1800000, 1800000, 100000, 100}, + {"vddio", 100000, 100}, /* 1.8 V */ }, }, .ops = { diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c index b4d1b4698..72360cd03 100644 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -37,7 +37,7 @@ static void edp_connector_destroy(struct drm_connector *connector) struct edp_connector *edp_connector = to_edp_connector(connector); DBG(""); - drm_connector_unregister(connector); + drm_connector_cleanup(connector); kfree(edp_connector); @@ -124,10 +124,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) int ret; edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL); - if (!edp_connector) { - ret = -ENOMEM; - goto fail; - } + if (!edp_connector) + return ERR_PTR(-ENOMEM); edp_connector->edp = edp; @@ -136,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs, DRM_MODE_CONNECTOR_eDP); if (ret) - goto fail; + return ERR_PTR(ret); drm_connector_helper_add(connector, &edp_connector_helper_funcs); @@ -147,17 +145,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp) connector->interlace_allowed = false; connector->doublescan_allowed = false; - ret = drm_connector_register(connector); - if (ret) - goto fail; - drm_mode_connector_attach_encoder(connector, edp->encoder); return connector; - -fail: - if (connector) - edp_connector_destroy(connector); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 81200e9be..149bfe7dd 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -21,8 +21,6 @@ #include "edp.h" #include "edp.xml.h" -#define VDDA_MIN_UV 1800000 /* uV units */ -#define VDDA_MAX_UV 1800000 /* uV units */ #define VDDA_UA_ON_LOAD 100000 /* uA units */ #define VDDA_UA_OFF_LOAD 100 /* uA units */ @@ -67,7 +65,7 @@ struct edp_ctrl { void __iomem *base; /* regulators */ - struct regulator *vdda_vreg; + struct regulator *vdda_vreg; /* 1.8 V */ struct regulator *lvl_vreg; /* clocks */ @@ -302,21 +300,24 @@ static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask) static int edp_regulator_init(struct edp_ctrl *ctrl) { struct device *dev = &ctrl->pdev->dev; + int ret; DBG(""); ctrl->vdda_vreg = devm_regulator_get(dev, "vdda"); - if (IS_ERR(ctrl->vdda_vreg)) { - pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__, - PTR_ERR(ctrl->vdda_vreg)); + ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg); + if (ret) { + pr_err("%s: Could not get vdda reg, ret = %d\n", __func__, + ret); ctrl->vdda_vreg = NULL; - return PTR_ERR(ctrl->vdda_vreg); + return ret; } ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd"); - if (IS_ERR(ctrl->lvl_vreg)) { - pr_err("Could not get lvl-vdd reg, %ld", - PTR_ERR(ctrl->lvl_vreg)); + ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg); + if (ret) { + pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__, + ret); ctrl->lvl_vreg = NULL; - return PTR_ERR(ctrl->lvl_vreg); + return ret; } return 0; @@ -326,12 +327,6 @@ static int edp_regulator_enable(struct edp_ctrl *ctrl) { int ret; - ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV); - if (ret) { - pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret); - goto vdda_set_fail; - } - ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD); if (ret < 0) { pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index 65428cf23..bc7ba0bde 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -243,10 +243,21 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi); /* * hdcp */ +#ifdef CONFIG_DRM_MSM_HDMI_HDCP struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi); void msm_hdmi_hdcp_destroy(struct hdmi *hdmi); void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl); void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl); void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl); +#else +static inline struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi) +{ + return ERR_PTR(-ENXIO); +} +static inline void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) {} +static inline void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) {} +static inline void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) {} +static inline void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) {} +#endif #endif /* __HDMI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 26129bff2..b15d72683 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -112,6 +112,9 @@ static int gpio_config(struct hdmi *hdmi, bool on) for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) { struct hdmi_gpio_data gpio = config->gpios[i]; + if (gpio.num == -1) + continue; + if (gpio.output) { int value = gpio.value ? 0 : 1; @@ -126,8 +129,10 @@ static int gpio_config(struct hdmi *hdmi, bool on) return 0; err: - while (i--) - gpio_free(config->gpios[i].num); + while (i--) { + if (config->gpios[i].num != -1) + gpio_free(config->gpios[i].num); + } return ret; } @@ -341,7 +346,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector) hdp_disable(hdmi_connector); - drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(hdmi_connector); @@ -433,10 +437,8 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) int ret; hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL); - if (!hdmi_connector) { - ret = -ENOMEM; - goto fail; - } + if (!hdmi_connector) + return ERR_PTR(-ENOMEM); hdmi_connector->hdmi = hdmi; INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work); @@ -453,21 +455,13 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_connector_register(connector); - ret = hpd_enable(hdmi_connector); if (ret) { dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret); - goto fail; + return ERR_PTR(ret); } drm_mode_connector_attach_encoder(connector, hdmi->encoder); return connector; - -fail: - if (connector) - hdmi_connector_destroy(connector); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index e233acf52..9527dafc3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -121,7 +121,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) if (!file || (event->base.file_priv == file)) { mdp4_crtc->event = NULL; DBG("%s: send event: %p", mdp4_crtc->name, event); - drm_send_vblank_event(dev, mdp4_crtc->id, event); + drm_crtc_send_vblank_event(crtc, event); } } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -427,7 +427,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, } if (handle) { - cursor_bo = drm_gem_object_lookup(dev, file_priv, handle); + cursor_bo = drm_gem_object_lookup(file_priv, handle); if (!cursor_bo) return -ENOENT; } else { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 76e1dfb5d..67442d50a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -50,30 +50,6 @@ static int mdp4_hw_init(struct msm_kms *kms) mdp4_kms->rev = minor; - if (mdp4_kms->dsi_pll_vdda) { - if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) { - ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda, - 1200000, 1200000); - if (ret) { - dev_err(dev->dev, - "failed to set dsi_pll_vdda voltage: %d\n", ret); - goto out; - } - } - } - - if (mdp4_kms->dsi_pll_vddio) { - if (mdp4_kms->rev == 2) { - ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio, - 1800000, 1800000); - if (ret) { - dev_err(dev->dev, - "failed to set dsi_pll_vddio voltage: %d\n", ret); - goto out; - } - } - } - if (mdp4_kms->rev > 1) { mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff); mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f); @@ -485,16 +461,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - mdp4_kms->dsi_pll_vdda = - devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda"); - if (IS_ERR(mdp4_kms->dsi_pll_vdda)) - mdp4_kms->dsi_pll_vdda = NULL; - - mdp4_kms->dsi_pll_vddio = - devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio"); - if (IS_ERR(mdp4_kms->dsi_pll_vddio)) - mdp4_kms->dsi_pll_vddio = NULL; - /* NOTE: driver for this regulator still missing upstream.. use * _get_exclusive() and ignore the error if it does not exist * (and hope that the bootloader left it on for us) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index b2828717b..c5d045d56 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -37,8 +37,6 @@ struct mdp4_kms { void __iomem *mmio; - struct regulator *dsi_pll_vdda; - struct regulator *dsi_pll_vddio; struct regulator *vdd; struct clk *clk; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c index e73e1742b..2648cd763 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c @@ -48,7 +48,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector) struct mdp4_lvds_connector *mdp4_lvds_connector = to_mdp4_lvds_connector(connector); - drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(mdp4_lvds_connector); @@ -121,13 +120,10 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, { struct drm_connector *connector = NULL; struct mdp4_lvds_connector *mdp4_lvds_connector; - int ret; mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL); - if (!mdp4_lvds_connector) { - ret = -ENOMEM; - goto fail; - } + if (!mdp4_lvds_connector) + return ERR_PTR(-ENOMEM); mdp4_lvds_connector->encoder = encoder; mdp4_lvds_connector->panel_node = panel_node; @@ -143,15 +139,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev, connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - drm_connector_register(connector); - drm_mode_connector_attach_encoder(connector, encoder); return connector; - -fail: - if (connector) - mdp4_lvds_connector_destroy(connector); - - return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 9673b9520..88fe256c1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -149,7 +149,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) if (!file || (event->base.file_priv == file)) { mdp5_crtc->event = NULL; DBG("%s: send event: %p", mdp5_crtc->name, event); - drm_send_vblank_event(dev, mdp5_crtc->id, event); + drm_crtc_send_vblank_event(crtc, event); } } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -518,7 +518,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, goto set_cursor; } - cursor_bo = drm_gem_object_lookup(dev, file, handle); + cursor_bo = drm_gem_object_lookup(file, handle); if (!cursor_bo) return -ENOENT; diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c index 1c2caffc9..b4a8aa449 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_format.c +++ b/drivers/gpu/drm/msm/mdp/mdp_format.c @@ -105,6 +105,12 @@ static const struct mdp_format formats[] = { MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), + FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4, + MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, MDP_PLANE_INTERLEAVED, CHROMA_FULL, false), FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3, diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 7eb253bc2..e3892c263 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -18,16 +18,16 @@ #include "msm_drv.h" #include "msm_kms.h" #include "msm_gem.h" +#include "msm_fence.h" struct msm_commit { struct drm_device *dev; struct drm_atomic_state *state; - uint32_t fence; - struct msm_fence_cb fence_cb; + struct work_struct work; uint32_t crtc_mask; }; -static void fence_cb(struct msm_fence_cb *cb); +static void commit_worker(struct work_struct *work); /* block until specified crtcs are no longer pending update, and * atomically mark them as pending update @@ -69,11 +69,7 @@ static struct msm_commit *commit_init(struct drm_atomic_state *state) c->dev = state->dev; c->state = state; - /* TODO we might need a way to indicate to run the cb on a - * different wq so wait_for_vblanks() doesn't block retiring - * bo's.. - */ - INIT_FENCE_CB(&c->fence_cb, fence_cb); + INIT_WORK(&c->work, commit_worker); return c; } @@ -114,13 +110,15 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, /* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ -static void complete_commit(struct msm_commit *c) +static void complete_commit(struct msm_commit *c, bool async) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; + drm_atomic_helper_wait_for_fences(dev, state); + kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); @@ -153,17 +151,9 @@ static void complete_commit(struct msm_commit *c) commit_destroy(c); } -static void fence_cb(struct msm_fence_cb *cb) -{ - struct msm_commit *c = - container_of(cb, struct msm_commit, fence_cb); - complete_commit(c); -} - -static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) +static void commit_worker(struct work_struct *work) { - struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0); - c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); + complete_commit(container_of(work, struct msm_commit, work), true); } int msm_atomic_check(struct drm_device *dev, @@ -190,21 +180,20 @@ int msm_atomic_check(struct drm_device *dev, * drm_atomic_helper_commit - commit validated state object * @dev: DRM device * @state: the driver state object - * @async: asynchronous commit + * @nonblock: nonblocking commit * * This function commits a with drm_atomic_helper_check() pre-validated state - * object. This can still fail when e.g. the framebuffer reservation fails. For - * now this doesn't implement asynchronous commits. + * object. This can still fail when e.g. the framebuffer reservation fails. * * RETURNS * Zero for success or -errno. */ int msm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool async) + struct drm_atomic_state *state, bool nonblock) { + struct msm_drm_private *priv = dev->dev_private; int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; - ktime_t timeout; struct msm_commit *c; int i, ret; @@ -238,8 +227,12 @@ int msm_atomic_commit(struct drm_device *dev, if (!plane) continue; - if ((plane->state->fb != new_state->fb) && new_state->fb) - add_fb(c, new_state->fb); + if ((plane->state->fb != new_state->fb) && new_state->fb) { + struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); + } } /* @@ -276,17 +269,12 @@ int msm_atomic_commit(struct drm_device *dev, * current layout. */ - if (async) { - msm_queue_fence_cb(dev, &c->fence_cb, c->fence); + if (nonblock) { + queue_work(priv->atomic_wq, &c->work); return 0; } - timeout = ktime_add_ms(ktime_get(), 1000); - - /* uninterruptible wait */ - msm_wait_fence(dev, c->fence, &timeout, false); - - complete_commit(c); + complete_commit(c, false); return 0; diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c new file mode 100644 index 000000000..663f2b6ef --- /dev/null +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2013-2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifdef CONFIG_DEBUG_FS +#include "msm_drv.h" +#include "msm_gpu.h" + +static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + + if (gpu) { + seq_printf(m, "%s Status:\n", gpu->name); + gpu->funcs->show(gpu, m); + } + + return 0; +} + +static int msm_gem_show(struct drm_device *dev, struct seq_file *m) +{ + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + + if (gpu) { + seq_printf(m, "Active Objects (%s):\n", gpu->name); + msm_gem_describe_objects(&gpu->active_list, m); + } + + seq_printf(m, "Inactive Objects:\n"); + msm_gem_describe_objects(&priv->inactive_list, m); + + return 0; +} + +static int msm_mm_show(struct drm_device *dev, struct seq_file *m) +{ + return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); +} + +static int msm_fb_show(struct drm_device *dev, struct seq_file *m) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_framebuffer *fb, *fbdev_fb = NULL; + + if (priv->fbdev) { + seq_printf(m, "fbcon "); + fbdev_fb = priv->fbdev->fb; + msm_framebuffer_describe(fbdev_fb, m); + } + + mutex_lock(&dev->mode_config.fb_lock); + list_for_each_entry(fb, &dev->mode_config.fb_list, head) { + if (fb == fbdev_fb) + continue; + + seq_printf(m, "user "); + msm_framebuffer_describe(fb, m); + } + mutex_unlock(&dev->mode_config.fb_lock); + + return 0; +} + +static int show_locked(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + int (*show)(struct drm_device *dev, struct seq_file *m) = + node->info_ent->data; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + ret = show(dev, m); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static struct drm_info_list msm_debugfs_list[] = { + {"gpu", show_locked, 0, msm_gpu_show}, + {"gem", show_locked, 0, msm_gem_show}, + { "mm", show_locked, 0, msm_mm_show }, + { "fb", show_locked, 0, msm_fb_show }, +}; + +static int late_init_minor(struct drm_minor *minor) +{ + int ret; + + if (!minor) + return 0; + + ret = msm_rd_debugfs_init(minor); + if (ret) { + dev_err(minor->dev->dev, "could not install rd debugfs\n"); + return ret; + } + + ret = msm_perf_debugfs_init(minor); + if (ret) { + dev_err(minor->dev->dev, "could not install perf debugfs\n"); + return ret; + } + + return 0; +} + +int msm_debugfs_late_init(struct drm_device *dev) +{ + int ret; + ret = late_init_minor(dev->primary); + if (ret) + return ret; + ret = late_init_minor(dev->render); + if (ret) + return ret; + ret = late_init_minor(dev->control); + return ret; +} + +int msm_debugfs_init(struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(msm_debugfs_list, + ARRAY_SIZE(msm_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install msm_debugfs_list\n"); + return ret; + } + + return 0; +} + +void msm_debugfs_cleanup(struct drm_minor *minor) +{ + drm_debugfs_remove_files(msm_debugfs_list, + ARRAY_SIZE(msm_debugfs_list), minor); + if (!minor->dev->dev_private) + return; + msm_rd_debugfs_cleanup(minor); + msm_perf_debugfs_cleanup(minor); +} +#endif + diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h new file mode 100644 index 000000000..6110c972f --- /dev/null +++ b/drivers/gpu/drm/msm/msm_debugfs.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __MSM_DEBUGFS_H__ +#define __MSM_DEBUGFS_H__ + +#ifdef CONFIG_DEBUG_FS +int msm_debugfs_init(struct drm_minor *minor); +void msm_debugfs_cleanup(struct drm_minor *minor); +#endif + +#endif /* __MSM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 701c51ed3..9c654092e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -16,6 +16,8 @@ */ #include "msm_drv.h" +#include "msm_debugfs.h" +#include "msm_fence.h" #include "msm_gpu.h" #include "msm_kms.h" @@ -173,13 +175,11 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv, return 0; } -/* - * DRM operations: - */ - -static int msm_unload(struct drm_device *dev) +static int msm_drm_uninit(struct device *dev) { - struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *ddev = platform_get_drvdata(pdev); + struct msm_drm_private *priv = ddev->dev_private; struct msm_kms *kms = priv->kms; struct msm_gpu *gpu = priv->gpu; struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; @@ -195,31 +195,37 @@ static int msm_unload(struct drm_device *dev) kfree(vbl_ev); } - drm_kms_helper_poll_fini(dev); + drm_kms_helper_poll_fini(ddev); + + drm_connector_unregister_all(ddev); + + drm_dev_unregister(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION if (fbdev && priv->fbdev) - msm_fbdev_free(dev); + msm_fbdev_free(ddev); #endif - drm_mode_config_cleanup(dev); - drm_vblank_cleanup(dev); + drm_mode_config_cleanup(ddev); - pm_runtime_get_sync(dev->dev); - drm_irq_uninstall(dev); - pm_runtime_put_sync(dev->dev); + pm_runtime_get_sync(dev); + drm_irq_uninstall(ddev); + pm_runtime_put_sync(dev); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); + flush_workqueue(priv->atomic_wq); + destroy_workqueue(priv->atomic_wq); + if (kms) { - pm_runtime_disable(dev->dev); + pm_runtime_disable(dev); kms->funcs->destroy(kms); } if (gpu) { - mutex_lock(&dev->struct_mutex); + mutex_lock(&ddev->struct_mutex); gpu->funcs->pm_suspend(gpu); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&ddev->struct_mutex); gpu->funcs->destroy(gpu); } @@ -227,13 +233,14 @@ static int msm_unload(struct drm_device *dev) DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); drm_mm_takedown(&priv->vram.mm); - dma_free_attrs(dev->dev, priv->vram.size, NULL, - priv->vram.paddr, &attrs); + dma_free_attrs(dev, priv->vram.size, NULL, + priv->vram.paddr, &attrs); } - component_unbind_all(dev->dev, dev); + component_unbind_all(dev, ddev); - dev->dev_private = NULL; + ddev->dev_private = NULL; + drm_dev_unref(ddev); kfree(priv); @@ -321,50 +328,60 @@ static int msm_init_vram(struct drm_device *dev) return ret; } -static int msm_load(struct drm_device *dev, unsigned long flags) +static int msm_drm_init(struct device *dev, struct drm_driver *drv) { - struct platform_device *pdev = dev->platformdev; + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *ddev; struct msm_drm_private *priv; struct msm_kms *kms; int ret; + ddev = drm_dev_alloc(drv, dev); + if (!ddev) { + dev_err(dev, "failed to allocate drm_device\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, ddev); + ddev->platformdev = pdev; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { - dev_err(dev->dev, "failed to allocate private data\n"); + drm_dev_unref(ddev); return -ENOMEM; } - dev->dev_private = priv; + ddev->dev_private = priv; priv->wq = alloc_ordered_workqueue("msm", 0); - init_waitqueue_head(&priv->fence_event); + priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); init_waitqueue_head(&priv->pending_crtcs_event); INIT_LIST_HEAD(&priv->inactive_list); - INIT_LIST_HEAD(&priv->fence_cbs); INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker); spin_lock_init(&priv->vblank_ctrl.lock); - drm_mode_config_init(dev); - - platform_set_drvdata(pdev, dev); + drm_mode_config_init(ddev); /* Bind all our sub-components: */ - ret = component_bind_all(dev->dev, dev); - if (ret) + ret = component_bind_all(dev, ddev); + if (ret) { + kfree(priv); + drm_dev_unref(ddev); return ret; + } - ret = msm_init_vram(dev); + ret = msm_init_vram(ddev); if (ret) goto fail; switch (get_mdp_ver(pdev)) { case 4: - kms = mdp4_kms_init(dev); + kms = mdp4_kms_init(ddev); break; case 5: - kms = mdp5_kms_init(dev); + kms = mdp5_kms_init(ddev); break; default: kms = ERR_PTR(-ENODEV); @@ -378,7 +395,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) * and (for example) use dmabuf/prime to share buffers with * imx drm driver on iMX5 */ - dev_err(dev->dev, "failed to load kms\n"); + dev_err(dev, "failed to load kms\n"); ret = PTR_ERR(kms); goto fail; } @@ -386,50 +403,64 @@ static int msm_load(struct drm_device *dev, unsigned long flags) priv->kms = kms; if (kms) { - pm_runtime_enable(dev->dev); + pm_runtime_enable(dev); ret = kms->funcs->hw_init(kms); if (ret) { - dev_err(dev->dev, "kms hw init failed: %d\n", ret); + dev_err(dev, "kms hw init failed: %d\n", ret); goto fail; } } - dev->mode_config.funcs = &mode_config_funcs; + ddev->mode_config.funcs = &mode_config_funcs; - ret = drm_vblank_init(dev, priv->num_crtcs); + ret = drm_vblank_init(ddev, priv->num_crtcs); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + dev_err(dev, "failed to initialize vblank\n"); goto fail; } - pm_runtime_get_sync(dev->dev); - ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); - pm_runtime_put_sync(dev->dev); + pm_runtime_get_sync(dev); + ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); + pm_runtime_put_sync(dev); if (ret < 0) { - dev_err(dev->dev, "failed to install IRQ handler\n"); + dev_err(dev, "failed to install IRQ handler\n"); + goto fail; + } + + ret = drm_dev_register(ddev, 0); + if (ret) + goto fail; + + ret = drm_connector_register_all(ddev); + if (ret) { + dev_err(dev, "failed to register connectors\n"); goto fail; } - drm_mode_config_reset(dev); + drm_mode_config_reset(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION if (fbdev) - priv->fbdev = msm_fbdev_init(dev); + priv->fbdev = msm_fbdev_init(ddev); #endif - ret = msm_debugfs_late_init(dev); + ret = msm_debugfs_late_init(ddev); if (ret) goto fail; - drm_kms_helper_poll_init(dev); + drm_kms_helper_poll_init(ddev); return 0; fail: - msm_unload(dev); + msm_drm_uninit(dev); return ret; } +/* + * DRM operations: + */ + static void load_gpu(struct drm_device *dev) { static DEFINE_MUTEX(init_lock); @@ -535,265 +566,6 @@ static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe) } /* - * DRM debugfs: - */ - -#ifdef CONFIG_DEBUG_FS -static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_gpu *gpu = priv->gpu; - - if (gpu) { - seq_printf(m, "%s Status:\n", gpu->name); - gpu->funcs->show(gpu, m); - } - - return 0; -} - -static int msm_gem_show(struct drm_device *dev, struct seq_file *m) -{ - struct msm_drm_private *priv = dev->dev_private; - struct msm_gpu *gpu = priv->gpu; - - if (gpu) { - seq_printf(m, "Active Objects (%s):\n", gpu->name); - msm_gem_describe_objects(&gpu->active_list, m); - } - - seq_printf(m, "Inactive Objects:\n"); - msm_gem_describe_objects(&priv->inactive_list, m); - - return 0; -} - -static int msm_mm_show(struct drm_device *dev, struct seq_file *m) -{ - return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm); -} - -static int msm_fb_show(struct drm_device *dev, struct seq_file *m) -{ - struct msm_drm_private *priv = dev->dev_private; - struct drm_framebuffer *fb, *fbdev_fb = NULL; - - if (priv->fbdev) { - seq_printf(m, "fbcon "); - fbdev_fb = priv->fbdev->fb; - msm_framebuffer_describe(fbdev_fb, m); - } - - mutex_lock(&dev->mode_config.fb_lock); - list_for_each_entry(fb, &dev->mode_config.fb_list, head) { - if (fb == fbdev_fb) - continue; - - seq_printf(m, "user "); - msm_framebuffer_describe(fb, m); - } - mutex_unlock(&dev->mode_config.fb_lock); - - return 0; -} - -static int show_locked(struct seq_file *m, void *arg) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - int (*show)(struct drm_device *dev, struct seq_file *m) = - node->info_ent->data; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - ret = show(dev, m); - - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -static struct drm_info_list msm_debugfs_list[] = { - {"gpu", show_locked, 0, msm_gpu_show}, - {"gem", show_locked, 0, msm_gem_show}, - { "mm", show_locked, 0, msm_mm_show }, - { "fb", show_locked, 0, msm_fb_show }, -}; - -static int late_init_minor(struct drm_minor *minor) -{ - int ret; - - if (!minor) - return 0; - - ret = msm_rd_debugfs_init(minor); - if (ret) { - dev_err(minor->dev->dev, "could not install rd debugfs\n"); - return ret; - } - - ret = msm_perf_debugfs_init(minor); - if (ret) { - dev_err(minor->dev->dev, "could not install perf debugfs\n"); - return ret; - } - - return 0; -} - -int msm_debugfs_late_init(struct drm_device *dev) -{ - int ret; - ret = late_init_minor(dev->primary); - if (ret) - return ret; - ret = late_init_minor(dev->render); - if (ret) - return ret; - ret = late_init_minor(dev->control); - return ret; -} - -static int msm_debugfs_init(struct drm_minor *minor) -{ - struct drm_device *dev = minor->dev; - int ret; - - ret = drm_debugfs_create_files(msm_debugfs_list, - ARRAY_SIZE(msm_debugfs_list), - minor->debugfs_root, minor); - - if (ret) { - dev_err(dev->dev, "could not install msm_debugfs_list\n"); - return ret; - } - - return 0; -} - -static void msm_debugfs_cleanup(struct drm_minor *minor) -{ - drm_debugfs_remove_files(msm_debugfs_list, - ARRAY_SIZE(msm_debugfs_list), minor); - if (!minor->dev->dev_private) - return; - msm_rd_debugfs_cleanup(minor); - msm_perf_debugfs_cleanup(minor); -} -#endif - -/* - * Fences: - */ - -int msm_wait_fence(struct drm_device *dev, uint32_t fence, - ktime_t *timeout , bool interruptible) -{ - struct msm_drm_private *priv = dev->dev_private; - int ret; - - if (!priv->gpu) - return 0; - - if (fence > priv->gpu->submitted_fence) { - DRM_ERROR("waiting on invalid fence: %u (of %u)\n", - fence, priv->gpu->submitted_fence); - return -EINVAL; - } - - if (!timeout) { - /* no-wait: */ - ret = fence_completed(dev, fence) ? 0 : -EBUSY; - } else { - ktime_t now = ktime_get(); - unsigned long remaining_jiffies; - - if (ktime_compare(*timeout, now) < 0) { - remaining_jiffies = 0; - } else { - ktime_t rem = ktime_sub(*timeout, now); - struct timespec ts = ktime_to_timespec(rem); - remaining_jiffies = timespec_to_jiffies(&ts); - } - - if (interruptible) - ret = wait_event_interruptible_timeout(priv->fence_event, - fence_completed(dev, fence), - remaining_jiffies); - else - ret = wait_event_timeout(priv->fence_event, - fence_completed(dev, fence), - remaining_jiffies); - - if (ret == 0) { - DBG("timeout waiting for fence: %u (completed: %u)", - fence, priv->completed_fence); - ret = -ETIMEDOUT; - } else if (ret != -ERESTARTSYS) { - ret = 0; - } - } - - return ret; -} - -int msm_queue_fence_cb(struct drm_device *dev, - struct msm_fence_cb *cb, uint32_t fence) -{ - struct msm_drm_private *priv = dev->dev_private; - int ret = 0; - - mutex_lock(&dev->struct_mutex); - if (!list_empty(&cb->work.entry)) { - ret = -EINVAL; - } else if (fence > priv->completed_fence) { - cb->fence = fence; - list_add_tail(&cb->work.entry, &priv->fence_cbs); - } else { - queue_work(priv->wq, &cb->work); - } - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -/* called from workqueue */ -void msm_update_fence(struct drm_device *dev, uint32_t fence) -{ - struct msm_drm_private *priv = dev->dev_private; - - mutex_lock(&dev->struct_mutex); - priv->completed_fence = max(fence, priv->completed_fence); - - while (!list_empty(&priv->fence_cbs)) { - struct msm_fence_cb *cb; - - cb = list_first_entry(&priv->fence_cbs, - struct msm_fence_cb, work.entry); - - if (cb->fence > priv->completed_fence) - break; - - list_del_init(&cb->work.entry); - queue_work(priv->wq, &cb->work); - } - - mutex_unlock(&dev->struct_mutex); - - wake_up_all(&priv->fence_event); -} - -void __msm_fence_worker(struct work_struct *work) -{ - struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work); - cb->func(cb); -} - -/* * DRM ioctls: */ @@ -850,7 +622,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, return -EINVAL; } - obj = drm_gem_object_lookup(dev, file, args->handle); + obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; @@ -868,7 +640,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, struct drm_gem_object *obj; int ret; - obj = drm_gem_object_lookup(dev, file, args->handle); + obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; @@ -889,7 +661,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, if (args->pad) return -EINVAL; - obj = drm_gem_object_lookup(dev, file, args->handle); + obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; @@ -903,6 +675,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, struct drm_file *file) { + struct msm_drm_private *priv = dev->dev_private; struct drm_msm_wait_fence *args = data; ktime_t timeout = to_ktime(args->timeout); @@ -911,7 +684,10 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, return -EINVAL; } - return msm_wait_fence(dev, args->fence, &timeout, true); + if (!priv->gpu) + return 0; + + return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); } static const struct drm_ioctl_desc msm_ioctls[] = { @@ -951,8 +727,6 @@ static struct drm_driver msm_driver = { DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_MODESET, - .load = msm_load, - .unload = msm_unload, .open = msm_open, .preclose = msm_preclose, .lastclose = msm_lastclose, @@ -1052,12 +826,12 @@ static int add_components(struct device *dev, struct component_match **matchptr, static int msm_drm_bind(struct device *dev) { - return drm_platform_init(&msm_driver, to_platform_device(dev)); + return msm_drm_init(dev, &msm_driver); } static void msm_drm_unbind(struct device *dev) { - drm_put_dev(platform_get_drvdata(to_platform_device(dev))); + msm_drm_uninit(dev); } static const struct component_master_ops msm_drm_ops = { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 870dbe58c..5b2963f32 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -49,6 +49,8 @@ struct msm_mmu; struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; +struct msm_fence_context; +struct msm_fence_cb; #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ @@ -100,9 +102,6 @@ struct msm_drm_private { struct drm_fb_helper *fbdev; - uint32_t next_fence, completed_fence; - wait_queue_head_t fence_event; - struct msm_rd_state *rd; struct msm_perf_state *perf; @@ -110,9 +109,7 @@ struct msm_drm_private { struct list_head inactive_list; struct workqueue_struct *wq; - - /* callbacks deferred until bo is inactive: */ - struct list_head fence_cbs; + struct workqueue_struct *atomic_wq; /* crtcs pending async atomic updates: */ uint32_t pending_crtcs; @@ -157,33 +154,14 @@ struct msm_format { uint32_t pixel_format; }; -/* callback from wq once fence has passed: */ -struct msm_fence_cb { - struct work_struct work; - uint32_t fence; - void (*func)(struct msm_fence_cb *cb); -}; - -void __msm_fence_worker(struct work_struct *work); - -#define INIT_FENCE_CB(_cb, _func) do { \ - INIT_WORK(&(_cb)->work, __msm_fence_worker); \ - (_cb)->func = _func; \ - } while (0) - int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int msm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool async); + struct drm_atomic_state *state, bool nonblock); int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); -int msm_wait_fence(struct drm_device *dev, uint32_t fence, - ktime_t *timeout, bool interruptible); -int msm_queue_fence_cb(struct drm_device *dev, - struct msm_fence_cb *cb, uint32_t fence); -void msm_update_fence(struct drm_device *dev, uint32_t fence); - +void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); @@ -213,13 +191,12 @@ int msm_gem_prime_pin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj); void *msm_gem_vaddr_locked(struct drm_gem_object *obj); void *msm_gem_vaddr(struct drm_gem_object *obj); -int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, - struct msm_fence_cb *cb); +int msm_gem_sync_object(struct drm_gem_object *obj, + struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool write, uint32_t fence); + struct msm_gpu *gpu, bool exclusive, struct fence *fence); void msm_gem_move_to_inactive(struct drm_gem_object *obj); -int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, - ktime_t *timeout); +int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); void msm_gem_free_object(struct drm_gem_object *obj); int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, @@ -227,7 +204,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); struct drm_gem_object *msm_gem_import(struct drm_device *dev, - uint32_t size, struct sg_table *sgt); + struct dma_buf *dmabuf, struct sg_table *sgt); int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); @@ -303,12 +280,6 @@ u32 msm_readl(const void __iomem *addr); #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) -static inline bool fence_completed(struct drm_device *dev, uint32_t fence) -{ - struct msm_drm_private *priv = dev->dev_private; - return priv->completed_fence >= fence; -} - static inline int align_pitch(int width, int bpp) { int bytespp = (bpp + 7) / 8; @@ -327,5 +298,20 @@ static inline int align_pitch(int width, int bpp) /* for conditionally setting boolean flag(s): */ #define COND(bool, val) ((bool) ? (val) : 0) +static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) +{ + ktime_t now = ktime_get(); + unsigned long remaining_jiffies; + + if (ktime_compare(*timeout, now) < 0) { + remaining_jiffies = 0; + } else { + ktime_t rem = ktime_sub(*timeout, now); + struct timespec ts = ktime_to_timespec(rem); + remaining_jiffies = timespec_to_jiffies(&ts); + } + + return remaining_jiffies; +} #endif /* __MSM_DRV_H__ */ diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index a474d6cf5..461dc8b87 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -77,7 +77,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", fb->width, fb->height, (char *)&fb->pixel_format, - fb->refcount.refcount.counter, fb->base.id); + drm_framebuffer_read_refcount(fb), fb->base.id); for (i = 0; i < n; i++) { seq_printf(m, " %d: offset=%d pitch=%d, obj: ", @@ -145,8 +145,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format); for (i = 0; i < n; i++) { - bos[i] = drm_gem_object_lookup(dev, file, - mode_cmd->handles[i]); + bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]); if (!bos[i]) { ret = -ENXIO; goto out_unref; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf34..c6cf837c5 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dev->mode_config.fb_base = paddr; fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); + if (IS_ERR(fbi->screen_base)) { + ret = PTR_ERR(fbi->screen_base); + goto fail_unlock; + } fbi->screen_size = fbdev->bo->size; fbi->fix.smem_start = paddr; fbi->fix.smem_len = fbdev->bo->size; diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c new file mode 100644 index 000000000..a9b9b1c95 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2013-2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/fence.h> + +#include "msm_drv.h" +#include "msm_fence.h" + + +struct msm_fence_context * +msm_fence_context_alloc(struct drm_device *dev, const char *name) +{ + struct msm_fence_context *fctx; + + fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); + if (!fctx) + return ERR_PTR(-ENOMEM); + + fctx->dev = dev; + fctx->name = name; + fctx->context = fence_context_alloc(1); + init_waitqueue_head(&fctx->event); + spin_lock_init(&fctx->spinlock); + + return fctx; +} + +void msm_fence_context_free(struct msm_fence_context *fctx) +{ + kfree(fctx); +} + +static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence) +{ + return (int32_t)(fctx->completed_fence - fence) >= 0; +} + +/* legacy path for WAIT_FENCE ioctl: */ +int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, + ktime_t *timeout, bool interruptible) +{ + int ret; + + if (fence > fctx->last_fence) { + DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n", + fctx->name, fence, fctx->last_fence); + return -EINVAL; + } + + if (!timeout) { + /* no-wait: */ + ret = fence_completed(fctx, fence) ? 0 : -EBUSY; + } else { + unsigned long remaining_jiffies = timeout_to_jiffies(timeout); + + if (interruptible) + ret = wait_event_interruptible_timeout(fctx->event, + fence_completed(fctx, fence), + remaining_jiffies); + else + ret = wait_event_timeout(fctx->event, + fence_completed(fctx, fence), + remaining_jiffies); + + if (ret == 0) { + DBG("timeout waiting for fence: %u (completed: %u)", + fence, fctx->completed_fence); + ret = -ETIMEDOUT; + } else if (ret != -ERESTARTSYS) { + ret = 0; + } + } + + return ret; +} + +/* called from workqueue */ +void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) +{ + spin_lock(&fctx->spinlock); + fctx->completed_fence = max(fence, fctx->completed_fence); + spin_unlock(&fctx->spinlock); + + wake_up_all(&fctx->event); +} + +struct msm_fence { + struct msm_fence_context *fctx; + struct fence base; +}; + +static inline struct msm_fence *to_msm_fence(struct fence *fence) +{ + return container_of(fence, struct msm_fence, base); +} + +static const char *msm_fence_get_driver_name(struct fence *fence) +{ + return "msm"; +} + +static const char *msm_fence_get_timeline_name(struct fence *fence) +{ + struct msm_fence *f = to_msm_fence(fence); + return f->fctx->name; +} + +static bool msm_fence_enable_signaling(struct fence *fence) +{ + return true; +} + +static bool msm_fence_signaled(struct fence *fence) +{ + struct msm_fence *f = to_msm_fence(fence); + return fence_completed(f->fctx, f->base.seqno); +} + +static void msm_fence_release(struct fence *fence) +{ + struct msm_fence *f = to_msm_fence(fence); + kfree_rcu(f, base.rcu); +} + +static const struct fence_ops msm_fence_ops = { + .get_driver_name = msm_fence_get_driver_name, + .get_timeline_name = msm_fence_get_timeline_name, + .enable_signaling = msm_fence_enable_signaling, + .signaled = msm_fence_signaled, + .wait = fence_default_wait, + .release = msm_fence_release, +}; + +struct fence * +msm_fence_alloc(struct msm_fence_context *fctx) +{ + struct msm_fence *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return ERR_PTR(-ENOMEM); + + f->fctx = fctx; + + fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, + fctx->context, ++fctx->last_fence); + + return &f->base; +} diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h new file mode 100644 index 000000000..ceb5b3d31 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013-2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __MSM_FENCE_H__ +#define __MSM_FENCE_H__ + +#include "msm_drv.h" + +struct msm_fence_context { + struct drm_device *dev; + const char *name; + unsigned context; + /* last_fence == completed_fence --> no pending work */ + uint32_t last_fence; /* last assigned fence */ + uint32_t completed_fence; /* last completed fence */ + wait_queue_head_t event; + spinlock_t spinlock; +}; + +struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev, + const char *name); +void msm_fence_context_free(struct msm_fence_context *fctx); + +int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, + ktime_t *timeout, bool interruptible); +int msm_queue_fence_cb(struct msm_fence_context *fctx, + struct msm_fence_cb *cb, uint32_t fence); +void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); + +struct fence * msm_fence_alloc(struct msm_fence_context *fctx); + +#endif diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3cedb8d5c..69836f568 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -21,6 +21,7 @@ #include <linux/pfn_t.h> #include "msm_drv.h" +#include "msm_fence.h" #include "msm_gem.h" #include "msm_gpu.h" #include "msm_mmu.h" @@ -373,7 +374,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, int ret = 0; /* GEM does all our handle to object mapping */ - obj = drm_gem_object_lookup(dev, file, handle); + obj = drm_gem_object_lookup(file, handle); if (obj == NULL) { ret = -ENOENT; goto fail; @@ -397,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) return ERR_CAST(pages); msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (msm_obj->vaddr == NULL) + return ERR_PTR(-ENOMEM); } return msm_obj->vaddr; } @@ -410,27 +413,62 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) return ret; } -/* setup callback for when bo is no longer busy.. - * TODO probably want to differentiate read vs write.. - */ -int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, - struct msm_fence_cb *cb) +/* must be called before _move_to_active().. */ +int msm_gem_sync_object(struct drm_gem_object *obj, + struct msm_fence_context *fctx, bool exclusive) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - uint32_t fence = msm_gem_fence(msm_obj, - MSM_PREP_READ | MSM_PREP_WRITE); - return msm_queue_fence_cb(obj->dev, cb, fence); + struct reservation_object_list *fobj; + struct fence *fence; + int i, ret; + + if (!exclusive) { + /* NOTE: _reserve_shared() must happen before _add_shared_fence(), + * which makes this a slightly strange place to call it. OTOH this + * is a convenient can-fail point to hook it in. (And similar to + * how etnaviv and nouveau handle this.) + */ + ret = reservation_object_reserve_shared(msm_obj->resv); + if (ret) + return ret; + } + + fobj = reservation_object_get_list(msm_obj->resv); + if (!fobj || (fobj->shared_count == 0)) { + fence = reservation_object_get_excl(msm_obj->resv); + /* don't need to wait on our own fences, since ring is fifo */ + if (fence && (fence->context != fctx->context)) { + ret = fence_wait(fence, true); + if (ret) + return ret; + } + } + + if (!exclusive || !fobj) + return 0; + + for (i = 0; i < fobj->shared_count; i++) { + fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(msm_obj->resv)); + if (fence->context != fctx->context) { + ret = fence_wait(fence, true); + if (ret) + return ret; + } + } + + return 0; } void msm_gem_move_to_active(struct drm_gem_object *obj, - struct msm_gpu *gpu, bool write, uint32_t fence) + struct msm_gpu *gpu, bool exclusive, struct fence *fence) { struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_obj->gpu = gpu; - if (write) - msm_obj->write_fence = fence; + if (exclusive) + reservation_object_add_excl_fence(msm_obj->resv, fence); else - msm_obj->read_fence = fence; + reservation_object_add_shared_fence(msm_obj->resv, fence); list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); } @@ -444,30 +482,30 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); msm_obj->gpu = NULL; - msm_obj->read_fence = 0; - msm_obj->write_fence = 0; list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); } int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) { - struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); - int ret = 0; + bool write = !!(op & MSM_PREP_WRITE); - if (is_active(msm_obj)) { - uint32_t fence = msm_gem_fence(msm_obj, op); - - if (op & MSM_PREP_NOSYNC) - timeout = NULL; + if (op & MSM_PREP_NOSYNC) { + if (!reservation_object_test_signaled_rcu(msm_obj->resv, write)) + return -EBUSY; + } else { + int ret; - ret = msm_wait_fence(dev, fence, timeout, true); + ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, + true, timeout_to_jiffies(timeout)); + if (ret <= 0) + return ret == 0 ? -ETIMEDOUT : ret; } /* TODO cache maintenance */ - return ret; + return 0; } int msm_gem_cpu_fini(struct drm_gem_object *obj) @@ -477,18 +515,46 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) } #ifdef CONFIG_DEBUG_FS +static void describe_fence(struct fence *fence, const char *type, + struct seq_file *m) +{ + if (!fence_is_signaled(fence)) + seq_printf(m, "\t%9s: %s %s seq %u\n", type, + fence->ops->get_driver_name(fence), + fence->ops->get_timeline_name(fence), + fence->seqno); +} + void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { - struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct reservation_object *robj = msm_obj->resv; + struct reservation_object_list *fobj; + struct fence *fence; uint64_t off = drm_vma_node_start(&obj->vma_node); - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n", + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', - msm_obj->read_fence, msm_obj->write_fence, obj->name, obj->refcount.refcount.counter, off, msm_obj->vaddr, obj->size); + + rcu_read_lock(); + fobj = rcu_dereference(robj->fence); + if (fobj) { + unsigned int i, shared_count = fobj->shared_count; + + for (i = 0; i < shared_count; i++) { + fence = rcu_dereference(fobj->shared[i]); + describe_fence(fence, "Shared", m); + } + } + + fence = rcu_dereference(robj->fence_excl); + if (fence) + describe_fence(fence, "Exclusive", m); + rcu_read_unlock(); } void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) @@ -583,6 +649,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, static int msm_gem_new_impl(struct drm_device *dev, uint32_t size, uint32_t flags, + struct reservation_object *resv, struct drm_gem_object **obj) { struct msm_drm_private *priv = dev->dev_private; @@ -622,8 +689,12 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->flags = flags; - msm_obj->resv = &msm_obj->_resv; - reservation_object_init(msm_obj->resv); + if (resv) { + msm_obj->resv = resv; + } else { + msm_obj->resv = &msm_obj->_resv; + reservation_object_init(msm_obj->resv); + } INIT_LIST_HEAD(&msm_obj->submit_entry); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); @@ -643,7 +714,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, size = PAGE_ALIGN(size); - ret = msm_gem_new_impl(dev, size, flags, &obj); + ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); if (ret) goto fail; @@ -665,10 +736,11 @@ fail: } struct drm_gem_object *msm_gem_import(struct drm_device *dev, - uint32_t size, struct sg_table *sgt) + struct dma_buf *dmabuf, struct sg_table *sgt) { struct msm_gem_object *msm_obj; struct drm_gem_object *obj; + uint32_t size; int ret, npages; /* if we don't have IOMMU, don't bother pretending we can import: */ @@ -677,9 +749,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, return ERR_PTR(-EINVAL); } - size = PAGE_ALIGN(size); + size = PAGE_ALIGN(dmabuf->size); - ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); + ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 6fc59bfee..9facd4b6f 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -39,7 +39,6 @@ struct msm_gem_object { */ struct list_head mm_list; struct msm_gpu *gpu; /* non-null if active */ - uint32_t read_fence, write_fence; /* Transiently in the process of submit ioctl, objects associated * with the submit are on submit->bo_list.. this only lasts for @@ -73,19 +72,6 @@ static inline bool is_active(struct msm_gem_object *msm_obj) return msm_obj->gpu != NULL; } -static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj, - uint32_t op) -{ - uint32_t fence = 0; - - if (op & MSM_PREP_READ) - fence = msm_obj->write_fence; - if (op & MSM_PREP_WRITE) - fence = max(fence, msm_obj->read_fence); - - return fence; -} - #define MAX_CMDS 4 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, @@ -99,8 +85,9 @@ struct msm_gem_submit { struct list_head node; /* node in gpu submit_list */ struct list_head bo_list; struct ww_acquire_ctx ticket; - uint32_t fence; - bool valid; + struct fence *fence; + struct pid *pid; /* submitting process */ + bool valid; /* true if no cmdstream patching needed */ unsigned int nr_cmds; unsigned int nr_bos; struct { diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 121975b07..6b90890fa 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -55,7 +55,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { - return msm_gem_import(dev, attach->dmabuf->size, sg); + return msm_gem_import(dev, attach->dmabuf, sg); } int msm_gem_prime_pin(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 43d218123..eb4bb8b2f 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -24,15 +24,10 @@ */ /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ -#define BO_VALID 0x8000 +#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ #define BO_LOCKED 0x4000 #define BO_PINNED 0x2000 -static inline void __user *to_user_ptr(u64 address) -{ - return (void __user *)(uintptr_t)address; -} - static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, int nr) { @@ -40,21 +35,33 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); - if (submit) { - submit->dev = dev; - submit->gpu = gpu; + if (!submit) + return NULL; - /* initially, until copy_from_user() and bo lookup succeeds: */ - submit->nr_bos = 0; - submit->nr_cmds = 0; + submit->dev = dev; + submit->gpu = gpu; + submit->fence = NULL; + submit->pid = get_pid(task_pid(current)); - INIT_LIST_HEAD(&submit->bo_list); - ww_acquire_init(&submit->ticket, &reservation_ww_class); - } + /* initially, until copy_from_user() and bo lookup succeeds: */ + submit->nr_bos = 0; + submit->nr_cmds = 0; + + INIT_LIST_HEAD(&submit->node); + INIT_LIST_HEAD(&submit->bo_list); + ww_acquire_init(&submit->ticket, &reservation_ww_class); return submit; } +void msm_gem_submit_free(struct msm_gem_submit *submit) +{ + fence_put(submit->fence); + list_del(&submit->node); + put_pid(submit->pid); + kfree(submit); +} + static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { @@ -68,7 +75,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_gem_object *obj; struct msm_gem_object *msm_obj; void __user *userptr = - to_user_ptr(args->bos + (i * sizeof(submit_bo))); + u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); + + /* make sure we don't have garbage flags, in case we hit + * error path before flags is initialized: + */ + submit->bos[i].flags = 0; ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); if (ret) { @@ -136,16 +148,13 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) } /* This is where we make sure all the bo's are reserved and pin'd: */ -static int submit_validate_objects(struct msm_gem_submit *submit) +static int submit_lock_objects(struct msm_gem_submit *submit) { int contended, slow_locked = -1, i, ret = 0; retry: - submit->valid = true; - for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; - uint32_t iova; if (slow_locked == i) slow_locked = -1; @@ -159,30 +168,6 @@ retry: goto fail; submit->bos[i].flags |= BO_LOCKED; } - - - /* if locking succeeded, pin bo: */ - ret = msm_gem_get_iova_locked(&msm_obj->base, - submit->gpu->id, &iova); - - /* this would break the logic in the fail path.. there is no - * reason for this to happen, but just to be on the safe side - * let's notice if this starts happening in the future: - */ - WARN_ON(ret == -EDEADLK); - - if (ret) - goto fail; - - submit->bos[i].flags |= BO_PINNED; - - if (iova == submit->bos[i].iova) { - submit->bos[i].flags |= BO_VALID; - } else { - submit->bos[i].iova = iova; - submit->bos[i].flags &= ~BO_VALID; - submit->valid = false; - } } ww_acquire_done(&submit->ticket); @@ -211,6 +196,54 @@ fail: return ret; } +static int submit_fence_sync(struct msm_gem_submit *submit) +{ + int i, ret = 0; + + for (i = 0; i < submit->nr_bos; i++) { + struct msm_gem_object *msm_obj = submit->bos[i].obj; + bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; + + ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); + if (ret) + break; + } + + return ret; +} + +static int submit_pin_objects(struct msm_gem_submit *submit) +{ + int i, ret = 0; + + submit->valid = true; + + for (i = 0; i < submit->nr_bos; i++) { + struct msm_gem_object *msm_obj = submit->bos[i].obj; + uint32_t iova; + + /* if locking succeeded, pin bo: */ + ret = msm_gem_get_iova_locked(&msm_obj->base, + submit->gpu->id, &iova); + + if (ret) + break; + + submit->bos[i].flags |= BO_PINNED; + + if (iova == submit->bos[i].iova) { + submit->bos[i].flags |= BO_VALID; + } else { + submit->bos[i].iova = iova; + /* iova changed, so address in cmdstream is not valid: */ + submit->bos[i].flags &= ~BO_VALID; + submit->valid = false; + } + } + + return ret; +} + static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, struct msm_gem_object **obj, uint32_t *iova, bool *valid) { @@ -257,7 +290,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob for (i = 0; i < nr_relocs; i++) { struct drm_msm_gem_submit_reloc submit_reloc; void __user *userptr = - to_user_ptr(relocs + (i * sizeof(submit_reloc))); + u64_to_user_ptr(relocs + (i * sizeof(submit_reloc))); uint32_t iova, off; bool valid; @@ -302,7 +335,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob return 0; } -static void submit_cleanup(struct msm_gem_submit *submit, bool fail) +static void submit_cleanup(struct msm_gem_submit *submit) { unsigned i; @@ -349,14 +382,22 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - ret = submit_validate_objects(submit); + ret = submit_lock_objects(submit); + if (ret) + goto out; + + ret = submit_fence_sync(submit); + if (ret) + goto out; + + ret = submit_pin_objects(submit); if (ret) goto out; for (i = 0; i < args->nr_cmds; i++) { struct drm_msm_gem_submit_cmd submit_cmd; void __user *userptr = - to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); + u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd))); struct msm_gem_object *msm_obj; uint32_t iova; @@ -415,10 +456,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ret = msm_gpu_submit(gpu, submit, ctx); - args->fence = submit->fence; + args->fence = submit->fence->seqno; out: - submit_cleanup(submit, !!ret); + submit_cleanup(submit); + if (ret) + msm_gem_submit_free(submit); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 6b02ada65..36ed53e66 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -18,6 +18,7 @@ #include "msm_gpu.h" #include "msm_gem.h" #include "msm_mmu.h" +#include "msm_fence.h" /* @@ -265,22 +266,38 @@ static void inactive_start(struct msm_gpu *gpu) * Hangcheck detection for locked gpu: */ -static void retire_submits(struct msm_gpu *gpu, uint32_t fence); +static void retire_submits(struct msm_gpu *gpu); static void recover_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct drm_device *dev = gpu->dev; + struct msm_gem_submit *submit; + uint32_t fence = gpu->funcs->last_fence(gpu); - dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); + msm_update_fence(gpu->fctx, fence + 1); mutex_lock(&dev->struct_mutex); - if (msm_gpu_active(gpu)) { - struct msm_gem_submit *submit; - uint32_t fence = gpu->funcs->last_fence(gpu); + dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); + list_for_each_entry(submit, &gpu->submit_list, node) { + if (submit->fence->seqno == (fence + 1)) { + struct task_struct *task; + + rcu_read_lock(); + task = pid_task(submit->pid, PIDTYPE_PID); + if (task) { + dev_err(dev->dev, "%s: offending task: %s\n", + gpu->name, task->comm); + } + rcu_read_unlock(); + break; + } + } + + if (msm_gpu_active(gpu)) { /* retire completed submits, plus the one that hung: */ - retire_submits(gpu, fence + 1); + retire_submits(gpu); inactive_cancel(gpu); gpu->funcs->recover(gpu); @@ -290,6 +307,7 @@ static void recover_worker(struct work_struct *work) gpu->funcs->submit(gpu, submit, NULL); } } + mutex_unlock(&dev->struct_mutex); msm_gpu_retire(gpu); @@ -312,7 +330,7 @@ static void hangcheck_handler(unsigned long data) if (fence != gpu->hangcheck_fence) { /* some progress has been made.. ya! */ gpu->hangcheck_fence = fence; - } else if (fence < gpu->submitted_fence) { + } else if (fence < gpu->fctx->last_fence) { /* no progress and not done.. hung! */ gpu->hangcheck_fence = fence; dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", @@ -320,12 +338,12 @@ static void hangcheck_handler(unsigned long data) dev_err(dev->dev, "%s: completed fence: %u\n", gpu->name, fence); dev_err(dev->dev, "%s: submitted fence: %u\n", - gpu->name, gpu->submitted_fence); + gpu->name, gpu->fctx->last_fence); queue_work(priv->wq, &gpu->recover_work); } /* if still more pending work, reset the hangcheck timer: */ - if (gpu->submitted_fence > gpu->hangcheck_fence) + if (gpu->fctx->last_fence > gpu->hangcheck_fence) hangcheck_timer_reset(gpu); /* workaround for missing irq: */ @@ -431,7 +449,22 @@ out: * Cmdstream submission/retirement: */ -static void retire_submits(struct msm_gpu *gpu, uint32_t fence) +static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) +{ + int i; + + for (i = 0; i < submit->nr_bos; i++) { + struct msm_gem_object *msm_obj = submit->bos[i].obj; + /* move to inactive: */ + msm_gem_move_to_inactive(&msm_obj->base); + msm_gem_put_iova(&msm_obj->base, gpu->id); + drm_gem_object_unreference(&msm_obj->base); + } + + msm_gem_submit_free(submit); +} + +static void retire_submits(struct msm_gpu *gpu) { struct drm_device *dev = gpu->dev; @@ -443,9 +476,8 @@ static void retire_submits(struct msm_gpu *gpu, uint32_t fence) submit = list_first_entry(&gpu->submit_list, struct msm_gem_submit, node); - if (submit->fence <= fence) { - list_del(&submit->node); - kfree(submit); + if (fence_is_signaled(submit->fence)) { + retire_submit(gpu, submit); } else { break; } @@ -458,29 +490,10 @@ static void retire_worker(struct work_struct *work) struct drm_device *dev = gpu->dev; uint32_t fence = gpu->funcs->last_fence(gpu); - msm_update_fence(gpu->dev, fence); + msm_update_fence(gpu->fctx, fence); mutex_lock(&dev->struct_mutex); - - retire_submits(gpu, fence); - - while (!list_empty(&gpu->active_list)) { - struct msm_gem_object *obj; - - obj = list_first_entry(&gpu->active_list, - struct msm_gem_object, mm_list); - - if ((obj->read_fence <= fence) && - (obj->write_fence <= fence)) { - /* move to inactive: */ - msm_gem_move_to_inactive(&obj->base); - msm_gem_put_iova(&obj->base, gpu->id); - drm_gem_object_unreference(&obj->base); - } else { - break; - } - } - + retire_submits(gpu); mutex_unlock(&dev->struct_mutex); if (!msm_gpu_active(gpu)) @@ -505,9 +518,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - submit->fence = ++priv->next_fence; - - gpu->submitted_fence = submit->fence; + submit->fence = msm_fence_alloc(gpu->fctx); + if (IS_ERR(submit->fence)) { + ret = PTR_ERR(submit->fence); + submit->fence = NULL; + return ret; + } inactive_cancel(gpu); @@ -515,40 +531,34 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, msm_rd_dump_submit(submit); - gpu->submitted_fence = submit->fence; - update_sw_cntrs(gpu); for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; + uint32_t iova; /* can't happen yet.. but when we add 2d support we'll have * to deal w/ cross-ring synchronization: */ WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); - if (!is_active(msm_obj)) { - uint32_t iova; - - /* ring takes a reference to the bo and iova: */ - drm_gem_object_reference(&msm_obj->base); - msm_gem_get_iova_locked(&msm_obj->base, - submit->gpu->id, &iova); - } - - if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) - msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); + /* submit takes a reference to the bo and iova until retired: */ + drm_gem_object_reference(&msm_obj->base); + msm_gem_get_iova_locked(&msm_obj->base, + submit->gpu->id, &iova); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); + else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) + msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); } - ret = gpu->funcs->submit(gpu, submit, ctx); + gpu->funcs->submit(gpu, submit, ctx); priv->lastctx = ctx; hangcheck_timer_reset(gpu); - return ret; + return 0; } /* @@ -580,6 +590,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->funcs = funcs; gpu->name = name; gpu->inactive = true; + gpu->fctx = msm_fence_context_alloc(drm, name); + if (IS_ERR(gpu->fctx)) { + ret = PTR_ERR(gpu->fctx); + gpu->fctx = NULL; + goto fail; + } INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); @@ -700,4 +716,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) if (gpu->mmu) gpu->mmu->funcs->destroy(gpu->mmu); + + if (gpu->fctx) + msm_fence_context_free(gpu->fctx); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 2bbe85a3d..c9022837a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -22,6 +22,7 @@ #include <linux/regulator/consumer.h> #include "msm_drv.h" +#include "msm_fence.h" #include "msm_ringbuffer.h" struct msm_gem_submit; @@ -46,7 +47,7 @@ struct msm_gpu_funcs { int (*hw_init)(struct msm_gpu *gpu); int (*pm_suspend)(struct msm_gpu *gpu); int (*pm_resume)(struct msm_gpu *gpu); - int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, + void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); void (*flush)(struct msm_gpu *gpu); void (*idle)(struct msm_gpu *gpu); @@ -77,13 +78,15 @@ struct msm_gpu { const struct msm_gpu_perfcntr *perfcntrs; uint32_t num_perfcntrs; + /* ringbuffer: */ struct msm_ringbuffer *rb; uint32_t rb_iova; /* list of GEM active objects: */ struct list_head active_list; - uint32_t submitted_fence; + /* fencing: */ + struct msm_fence_context *fctx; /* is gpu powered/active? */ int active_cnt; @@ -125,7 +128,7 @@ struct msm_gpu { static inline bool msm_gpu_active(struct msm_gpu *gpu) { - return gpu->submitted_fence > gpu->funcs->last_fence(gpu); + return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu); } /* Perf-Counters: diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 9a78c4881..0857710c2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -296,7 +296,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", TASK_COMM_LEN, current->comm, task_pid_nr(current), - submit->fence); + submit->fence->seqno); rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) struct msm_gem_object *obj = submit->bos[idx].obj; const char *buf = msm_gem_vaddr_locked(&obj->base); + if (IS_ERR(buf)) + continue; + buf += iova - submit->bos[idx].iova; rd_write_section(rd, RD_GPUADDR, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b..42f5359cf 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) } ring->start = msm_gem_vaddr_locked(ring->bo); + if (IS_ERR(ring->start)) { + ret = PTR_ERR(ring->start); + goto fail; + } ring->end = ring->start + (size / 4); ring->cur = ring->start; |