summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig6
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug41
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c211
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c458
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c524
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c208
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h521
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c781
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c340
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c179
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c576
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c84
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c210
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c165
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c859
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h288
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h52
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c36
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c18
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c397
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h861
-rw-r--r--drivers/gpu/drm/i915/intel_color.c553
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c173
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1385
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3413
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c589
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c1786
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h164
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h200
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c455
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h30
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c177
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c282
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c5
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h29
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c71
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c42
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c25
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1353
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h26
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c169
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c69
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c80
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c65
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c616
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1900
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h134
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c519
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c20
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c61
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c672
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h845
76 files changed, 13653 insertions, 9754 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 20a5d0455..29a32b119 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -56,3 +56,9 @@ config DRM_I915_USERPTR
selected to enabled full userptr support.
If in doubt, say "Y".
+
+menu "drm/i915 Debugging"
+depends on DRM_I915
+depends on EXPERT
+source drivers/gpu/drm/i915/Kconfig.debug
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
new file mode 100644
index 000000000..8f4041033
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -0,0 +1,41 @@
+config DRM_I915_WERROR
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+ Do not enable this unless you are writing code for the i915.ko module.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_I915_DEBUG
+ bool "Enable additional driver debugging"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_I915_DEBUG_GEM
+ bool "Insert extra checks into the GEM internals"
+ default n
+ depends on DRM_I915_WERROR
+ help
+ Enable extra sanity checks (including BUGs) along the GEM driver
+ paths that may slow the system down and if hit hang the machine.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0851de07b..0b88ba0f3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -2,6 +2,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+
# Please keep these build lists sorted!
# core driver code
@@ -55,7 +57,9 @@ i915-y += intel_audio.o \
intel_atomic.o \
intel_atomic_plane.o \
intel_bios.o \
+ intel_color.o \
intel_display.o \
+ intel_dpll_mgr.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894ed..a337f33be 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -444,6 +444,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
@@ -471,6 +472,25 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_L3SQCREG1),
REG32(GEN7_L3CNTLREG2),
REG32(GEN7_L3CNTLREG3),
+};
+
+static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
+ REG64_IDX(HSW_CS_GPR, 0),
+ REG64_IDX(HSW_CS_GPR, 1),
+ REG64_IDX(HSW_CS_GPR, 2),
+ REG64_IDX(HSW_CS_GPR, 3),
+ REG64_IDX(HSW_CS_GPR, 4),
+ REG64_IDX(HSW_CS_GPR, 5),
+ REG64_IDX(HSW_CS_GPR, 6),
+ REG64_IDX(HSW_CS_GPR, 7),
+ REG64_IDX(HSW_CS_GPR, 8),
+ REG64_IDX(HSW_CS_GPR, 9),
+ REG64_IDX(HSW_CS_GPR, 10),
+ REG64_IDX(HSW_CS_GPR, 11),
+ REG64_IDX(HSW_CS_GPR, 12),
+ REG64_IDX(HSW_CS_GPR, 13),
+ REG64_IDX(HSW_CS_GPR, 14),
+ REG64_IDX(HSW_CS_GPR, 15),
REG32(HSW_SCRATCH1,
.mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
.value = 0),
@@ -500,6 +520,33 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
#undef REG64
#undef REG32
+struct drm_i915_reg_table {
+ const struct drm_i915_reg_descriptor *regs;
+ int num_regs;
+ bool master;
+};
+
+static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+ { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+};
+
+static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+ { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+};
+
+static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
+ { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+};
+
+static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+ { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+};
+
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@@ -555,7 +602,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
-static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+static bool validate_cmds_sorted(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
@@ -577,7 +624,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
- ring->id, i, j, curr, previous);
+ engine->id, i, j, curr, previous);
ret = false;
}
@@ -611,11 +658,18 @@ static bool check_sorted(int ring_id,
return ret;
}
-static bool validate_regs_sorted(struct intel_engine_cs *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *engine)
{
- return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
- check_sorted(ring->id, ring->master_reg_table,
- ring->master_reg_count);
+ int i;
+ const struct drm_i915_reg_table *table;
+
+ for (i = 0; i < engine->reg_table_count; i++) {
+ table = &engine->reg_tables[i];
+ if (!check_sorted(engine->id, table->regs, table->num_regs))
+ return false;
+ }
+
+ return true;
}
struct cmd_node {
@@ -639,13 +693,13 @@ struct cmd_node {
*/
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
-static int init_hash_table(struct intel_engine_cs *ring,
+static int init_hash_table(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i, j;
- hash_init(ring->cmd_hash);
+ hash_init(engine->cmd_hash);
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -660,7 +714,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
return -ENOMEM;
desc_node->desc = desc;
- hash_add(ring->cmd_hash, &desc_node->node,
+ hash_add(engine->cmd_hash, &desc_node->node,
desc->cmd.value & CMD_HASH_MASK);
}
}
@@ -668,13 +722,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
return 0;
}
-static void fini_hash_table(struct intel_engine_cs *ring)
+static void fini_hash_table(struct intel_engine_cs *engine)
{
struct hlist_node *tmp;
struct cmd_node *desc_node;
int i;
- hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+ hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
hash_del(&desc_node->node);
kfree(desc_node);
}
@@ -690,18 +744,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
*
* Return: non-zero if initialization fails
*/
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
int ret;
- if (!IS_GEN7(ring->dev))
+ if (!IS_GEN7(engine->dev))
return 0;
- switch (ring->id) {
+ switch (engine->id) {
case RCS:
- if (IS_HASWELL(ring->dev)) {
+ if (IS_HASWELL(engine->dev)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
@@ -710,26 +764,23 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
- ring->reg_table = gen7_render_regs;
- ring->reg_count = ARRAY_SIZE(gen7_render_regs);
-
- if (IS_HASWELL(ring->dev)) {
- ring->master_reg_table = hsw_master_regs;
- ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ if (IS_HASWELL(engine->dev)) {
+ engine->reg_tables = hsw_render_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
- ring->master_reg_table = ivb_master_regs;
- ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ engine->reg_tables = ivb_render_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
- ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
- ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(ring->dev)) {
+ if (IS_HASWELL(engine->dev)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
@@ -737,44 +788,41 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
- ring->reg_table = gen7_blt_regs;
- ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
-
- if (IS_HASWELL(ring->dev)) {
- ring->master_reg_table = hsw_master_regs;
- ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ if (IS_HASWELL(engine->dev)) {
+ engine->reg_tables = hsw_blt_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
- ring->master_reg_table = ivb_master_regs;
- ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ engine->reg_tables = ivb_blt_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
- ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
- ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
- ring->id);
+ engine->id);
BUG();
}
- BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
- BUG_ON(!validate_regs_sorted(ring));
+ BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
+ BUG_ON(!validate_regs_sorted(engine));
- WARN_ON(!hash_empty(ring->cmd_hash));
+ WARN_ON(!hash_empty(engine->cmd_hash));
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
+ fini_hash_table(engine);
return ret;
}
- ring->needs_cmd_parser = true;
+ engine->needs_cmd_parser = true;
return 0;
}
@@ -786,21 +834,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
*/
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
{
- if (!ring->needs_cmd_parser)
+ if (!engine->needs_cmd_parser)
return;
- fini_hash_table(ring);
+ fini_hash_table(engine);
}
static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(struct intel_engine_cs *ring,
+find_cmd_in_table(struct intel_engine_cs *engine,
u32 cmd_header)
{
struct cmd_node *desc_node;
- hash_for_each_possible(ring->cmd_hash, desc_node, node,
+ hash_for_each_possible(engine->cmd_hash, desc_node, node,
cmd_header & CMD_HASH_MASK) {
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -822,18 +870,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_engine_cs *ring,
+find_cmd(struct intel_engine_cs *engine,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
const struct drm_i915_cmd_descriptor *desc;
u32 mask;
- desc = find_cmd_in_table(ring, cmd_header);
+ desc = find_cmd_in_table(engine, cmd_header);
if (desc)
return desc;
- mask = ring->get_cmd_length_mask(cmd_header);
+ mask = engine->get_cmd_length_mask(cmd_header);
if (!mask)
return NULL;
@@ -848,12 +896,31 @@ static const struct drm_i915_reg_descriptor *
find_reg(const struct drm_i915_reg_descriptor *table,
int count, u32 addr)
{
- if (table) {
- int i;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (i915_mmio_reg_offset(table[i].addr) == addr)
+ return &table[i];
+ }
- for (i = 0; i < count; i++) {
- if (i915_mmio_reg_offset(table[i].addr) == addr)
- return &table[i];
+ return NULL;
+}
+
+static const struct drm_i915_reg_descriptor *
+find_reg_in_tables(const struct drm_i915_reg_table *tables,
+ int count, bool is_master, u32 addr)
+{
+ int i;
+ const struct drm_i915_reg_table *table;
+ const struct drm_i915_reg_descriptor *reg;
+
+ for (i = 0; i < count; i++) {
+ table = &tables[i];
+ if (!table->master || is_master) {
+ reg = find_reg(table->regs, table->num_regs,
+ addr);
+ if (reg != NULL)
+ return reg;
}
}
@@ -963,18 +1030,18 @@ unpin_src:
*
* Return: true if the ring requires software command parsing
*/
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
{
- if (!ring->needs_cmd_parser)
+ if (!engine->needs_cmd_parser)
return false;
- if (!USES_PPGTT(ring->dev))
+ if (!USES_PPGTT(engine->dev))
return false;
return (i915.enable_cmd_parser == 1);
}
-static bool check_cmd(const struct intel_engine_cs *ring,
+static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length,
const bool is_master,
@@ -1004,17 +1071,14 @@ static bool check_cmd(const struct intel_engine_cs *ring,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(ring->reg_table, ring->reg_count,
- reg_addr);
-
- if (!reg && is_master)
- reg = find_reg(ring->master_reg_table,
- ring->master_reg_count,
- reg_addr);
+ find_reg_in_tables(engine->reg_tables,
+ engine->reg_table_count,
+ is_master,
+ reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
- reg_addr, *cmd, ring->id);
+ reg_addr, *cmd, engine->id);
return false;
}
@@ -1087,7 +1151,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
- dword, ring->id);
+ dword, engine->id);
return false;
}
}
@@ -1113,7 +1177,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
@@ -1147,7 +1211,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
if (*cmd == MI_BATCH_BUFFER_END)
break;
- desc = find_cmd(ring, *cmd, &default_desc);
+ desc = find_cmd(engine, *cmd, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
@@ -1179,7 +1243,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
- if (!check_cmd(ring, desc, cmd, length, is_master,
+ if (!check_cmd(engine, desc, cmd, length, is_master,
&oacontrol_set)) {
ret = -EINVAL;
break;
@@ -1223,6 +1287,7 @@ int i915_cmd_parser_get_version(void)
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
+ * 6. TIMESTAMP register and Haswell CS GPR registers
*/
- return 5;
+ return 6;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e3f4c725a..103546834 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+static const char get_active_flag(struct drm_i915_gem_object *obj)
{
- if (obj->pin_display)
- return "p";
- else
- return " ";
+ return obj->active ? '*' : ' ';
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+static const char get_pin_flag(struct drm_i915_gem_object *obj)
+{
+ return obj->pin_display ? 'p' : ' ';
+}
+
+static const char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
- case I915_TILING_NONE: return " ";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
+ case I915_TILING_NONE: return ' ';
+ case I915_TILING_X: return 'X';
+ case I915_TILING_Y: return 'Y';
}
}
-static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
+static inline const char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+ return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+}
+
+static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+{
+ return obj->mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -129,23 +136,26 @@ static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
- int i;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
+ seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
&obj->base,
- obj->active ? "*" : " ",
+ get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
+ get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_ring(ring, dev_priv, i)
+ for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "%x ",
- i915_gem_request_get_seqno(obj->last_read_req[i]));
+ i915_gem_request_get_seqno(obj->last_read_req[id]));
seq_printf(m, "] %x %x%s%s%s",
i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req),
@@ -184,7 +194,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->last_write_req != NULL)
seq_printf(m, " (%s)",
- i915_gem_request_get_ring(obj->last_write_req)->name);
+ i915_gem_request_get_engine(obj->last_write_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -202,8 +212,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -216,11 +226,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
- head = &vm->active_list;
+ head = &ggtt->base.active_list;
break;
case INACTIVE_LIST:
seq_puts(m, "Inactive:\n");
- head = &vm->inactive_list;
+ head = &ggtt->base.inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -397,15 +407,15 @@ static void print_batch_pool_stats(struct seq_file *m,
{
struct drm_i915_gem_object *obj;
struct file_stats stats;
- struct intel_engine_cs *ring;
- int i, j;
+ struct intel_engine_cs *engine;
+ int j;
memset(&stats, 0, sizeof(stats));
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ for_each_engine(engine, dev_priv) {
+ for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link)
per_file_stats(0, obj, &stats);
}
@@ -429,11 +439,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
+ unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
+ u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
@@ -452,12 +464,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->active_list, vm_link);
+ count_vmas(&ggtt->base.active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->inactive_list, vm_link);
+ count_vmas(&ggtt->base.inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -466,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size += obj->base.size, ++count;
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
+ if (obj->mapping) {
+ pin_mapped_count++;
+ pin_mapped_size += obj->base.size;
+ if (obj->pages_pin_count == 0) {
+ pin_mapped_purgeable_count++;
+ pin_mapped_purgeable_size += obj->base.size;
+ }
+ }
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
@@ -483,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
purgeable_size += obj->base.size;
++purgeable_count;
}
+ if (obj->mapping) {
+ pin_mapped_count++;
+ pin_mapped_size += obj->base.size;
+ if (obj->pages_pin_count == 0) {
+ pin_mapped_purgeable_count++;
+ pin_mapped_purgeable_size += obj->base.size;
+ }
+ }
}
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
@@ -490,13 +518,20 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
+ seq_printf(m,
+ "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
+ pin_mapped_count, pin_mapped_purgeable_count,
+ pin_mapped_size, pin_mapped_purgeable_size);
seq_printf(m, "%llu [%llu] gtt total\n",
- dev_priv->gtt.base.total,
- (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -517,8 +552,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
-
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->filelist_mutex);
return 0;
}
@@ -591,14 +625,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(work->flip_queued_req);
+ struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
- ring->name,
+ engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- ring->get_seqno(ring, true),
+ engine->get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
@@ -637,28 +670,28 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int total = 0;
- int ret, i, j;
+ int ret, j;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ for_each_engine(engine, dev_priv) {
+ for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
count = 0;
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link)
count++;
seq_printf(m, "%s cache[%d]: %d objects\n",
- ring->name, j, count);
+ engine->name, j, count);
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link) {
seq_puts(m, " ");
describe_obj(m, obj);
@@ -681,26 +714,26 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
- int ret, any, i;
+ int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
any = 0;
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
int count;
count = 0;
- list_for_each_entry(req, &ring->request_list, list)
+ list_for_each_entry(req, &engine->request_list, list)
count++;
if (count == 0)
continue;
- seq_printf(m, "%s requests: %d\n", ring->name, count);
- list_for_each_entry(req, &ring->request_list, list) {
+ seq_printf(m, "%s requests: %d\n", engine->name, count);
+ list_for_each_entry(req, &engine->request_list, list) {
struct task_struct *task;
rcu_read_lock();
@@ -726,12 +759,12 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}
static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %x\n",
- ring->name, ring->get_seqno(ring, false));
- }
+ seq_printf(m, "Current sequence (%s): %x\n",
+ engine->name, engine->get_seqno(engine));
+ seq_printf(m, "Current user interrupts (%s): %x\n",
+ engine->name, READ_ONCE(engine->user_interrupts));
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -739,16 +772,16 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i;
+ struct intel_engine_cs *engine;
+ int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, i)
- i915_ring_seqno_info(m, ring);
+ for_each_engine(engine, dev_priv)
+ i915_ring_seqno_info(m, engine);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -762,7 +795,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -934,13 +967,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- ring->name, I915_READ_IMR(ring));
+ engine->name, I915_READ_IMR(engine));
}
- i915_ring_seqno_info(m, ring);
+ i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -981,12 +1014,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
const u32 *hws;
int i;
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = ring->status_page.page_addr;
+ engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+ hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -1216,12 +1249,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
rpstat = I915_READ(GEN6_RPSTAT1);
- rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
- rpcurup = I915_READ(GEN6_RP_CUR_UP);
- rpprevup = I915_READ(GEN6_RP_PREV_UP);
- rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
- rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
- rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
if (IS_GEN9(dev))
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -1261,21 +1294,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
- GEN6_CURICONT_MASK);
- seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
- GEN6_CURBSYTAVG_MASK);
- seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
- GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+ rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dus)\n",
+ rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dus)\n",
+ rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
dev_priv->rps.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
- GEN6_CURIAVG_MASK);
- seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
- GEN6_CURBSYTAVG_MASK);
- seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
- GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+ rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+ rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+ rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
@@ -1331,11 +1364,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- u64 acthd[I915_NUM_RINGS];
- u32 seqno[I915_NUM_RINGS];
+ struct intel_engine_cs *engine;
+ u64 acthd[I915_NUM_ENGINES];
+ u32 seqno[I915_NUM_ENGINES];
u32 instdone[I915_NUM_INSTDONE_REG];
- int i, j;
+ enum intel_engine_id id;
+ int j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
@@ -1344,9 +1378,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, i) {
- seqno[i] = ring->get_seqno(ring, false);
- acthd[i] = intel_ring_get_active_head(ring);
+ for_each_engine_id(engine, dev_priv, id) {
+ acthd[id] = intel_ring_get_active_head(engine);
+ seqno[id] = engine->get_seqno(engine);
}
i915_get_extra_instdone(dev, instdone);
@@ -1360,19 +1394,22 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "%s:\n", ring->name);
- seq_printf(m, "\tseqno = %x [current %x]\n",
- ring->hangcheck.seqno, seqno[i]);
+ for_each_engine_id(engine, dev_priv, id) {
+ seq_printf(m, "%s:\n", engine->name);
+ seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+ engine->hangcheck.seqno,
+ seqno[id],
+ engine->last_submitted_seqno);
+ seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+ engine->hangcheck.user_interrupts,
+ READ_ONCE(engine->user_interrupts));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)ring->hangcheck.acthd,
- (long long)acthd[i]);
- seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
- (long long)ring->hangcheck.max_acthd);
- seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
- seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
-
- if (ring->id == RCS) {
+ (long long)engine->hangcheck.acthd,
+ (long long)acthd[id]);
+ seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
+ seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
+
+ if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
@@ -1382,7 +1419,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
- ring->hangcheck.instdone[j]);
+ engine->hangcheck.instdone[j]);
seq_puts(m, "\n");
}
@@ -1465,12 +1502,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore_forcewake_domain *fw_domain;
- int i;
spin_lock_irq(&dev_priv->uncore.lock);
- for_each_fw_domain(fw_domain, dev_priv, i) {
+ for_each_fw_domain(fw_domain, dev_priv) {
seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(i),
+ intel_uncore_forcewake_domain_to_str(fw_domain->id),
fw_domain->wake_count);
}
spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1897,6 +1933,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (to_i915(dev)->fbdev) {
@@ -1908,7 +1949,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.depth,
fbdev_fb->base.bits_per_pixel,
fbdev_fb->base.modifier[0],
- atomic_read(&fbdev_fb->base.refcount.refcount));
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
seq_putc(m, '\n');
}
@@ -1926,11 +1967,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.depth,
fb->base.bits_per_pixel,
fb->base.modifier[0],
- atomic_read(&fb->base.refcount.refcount));
+ drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1948,9 +1990,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
- int ret, i;
+ enum intel_engine_id id;
+ int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1968,13 +2011,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
seq_putc(m, '\n');
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[i].state;
+ ctx->engine[id].state;
struct intel_ringbuffer *ringbuf =
- ctx->engine[i].ringbuf;
+ ctx->engine[id].ringbuf;
- seq_printf(m, "%s: ", ring->name);
+ seq_printf(m, "%s: ", engine->name);
if (ctx_obj)
describe_obj(m, ctx_obj);
if (ringbuf)
@@ -1995,22 +2038,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
struct page *page;
uint32_t *reg_state;
int j;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n",
- ring->name);
+ engine->name);
return;
}
- seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx, ring));
+ seq_printf(m, "CONTEXT: %s %u\n", engine->name,
+ intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
@@ -2043,9 +2086,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
- int ret, i;
+ int ret;
if (!i915.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
@@ -2058,8 +2101,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context)
- for_each_ring(ring, dev_priv, i)
- i915_dump_lrc_obj(m, ctx, ring);
+ for_each_engine(engine, dev_priv)
+ i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2071,15 +2114,14 @@ static int i915_execlists(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
u32 status;
u32 ctx_id;
struct list_head *cursor;
- int ring_id, i;
- int ret;
+ int i, ret;
if (!i915.enable_execlists) {
seq_puts(m, "Logical Ring Contexts are disabled\n");
@@ -2092,22 +2134,21 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, ring_id) {
+ for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *head_req = NULL;
int count = 0;
- unsigned long flags;
- seq_printf(m, "%s\n", ring->name);
+ seq_printf(m, "%s\n", engine->name);
- status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
+ status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+ ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id);
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
- read_pointer = ring->next_context_status_buffer;
+ read_pointer = engine->next_context_status_buffer;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
@@ -2115,24 +2156,25 @@ static int i915_execlists(struct seq_file *m, void *data)
read_pointer, write_pointer);
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
+ status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
+ ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id);
}
- spin_lock_irqsave(&ring->execlist_lock, flags);
- list_for_each(cursor, &ring->execlist_queue)
+ spin_lock_bh(&engine->execlist_lock);
+ list_for_each(cursor, &engine->execlist_queue)
count++;
- head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct drm_i915_gem_request, execlist_link);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ head_req = list_first_entry_or_null(&engine->execlist_queue,
+ struct drm_i915_gem_request,
+ execlist_link);
+ spin_unlock_bh(&engine->execlist_lock);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(head_req->ctx, ring));
+ intel_execlists_ctx_id(head_req->ctx, engine));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -2248,19 +2290,19 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int unused, i;
+ int i;
if (!ppgtt)
return;
- for_each_ring(ring, dev_priv, unused) {
- seq_printf(m, "%s\n", ring->name);
+ for_each_engine(engine, dev_priv) {
+ seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
- u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
pdp <<= 32;
- pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
}
}
@@ -2269,19 +2311,22 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "%s\n", ring->name);
+ for_each_engine(engine, dev_priv) {
+ seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7)
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ seq_printf(m, "GFX_MODE: 0x%08x\n",
+ I915_READ(RING_MODE_GEN7(engine)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
}
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2312,6 +2357,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
else if (INTEL_INFO(dev)->gen >= 6)
gen6_ppgtt_info(m, dev);
+ mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
@@ -2319,15 +2365,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
task = get_pid_task(file->pid, PIDTYPE_PID);
if (!task) {
ret = -ESRCH;
- goto out_put;
+ goto out_unlock;
}
seq_printf(m, "\nproc: %s\n", task->comm);
put_task_struct(task);
idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m);
}
+out_unlock:
+ mutex_unlock(&dev->filelist_mutex);
-out_put:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -2336,12 +2383,11 @@ out_put:
static int count_irq_waiters(struct drm_i915_private *i915)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int count = 0;
- int i;
- for_each_ring(ring, i915, i)
- count += ring->irq_refcount;
+ for_each_engine(engine, i915)
+ count += engine->irq_refcount;
return count;
}
@@ -2362,6 +2408,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+ mutex_lock(&dev->filelist_mutex);
spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2384,6 +2432,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
spin_unlock(&dev_priv->rps.client_lock);
+ mutex_unlock(&dev->filelist_mutex);
return 0;
}
@@ -2393,10 +2442,11 @@ static int i915_llc(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const bool edram = INTEL_GEN(dev_priv) > 8;
- /* Size calculation for LLC is a bit of a pain. Ignore for now. */
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
- seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+ seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
+ intel_uncore_edram_size(dev_priv)/1024/1024);
return 0;
}
@@ -2408,7 +2458,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
- if (!HAS_GUC_UCODE(dev_priv->dev))
+ if (!HAS_GUC_UCODE(dev_priv))
return 0;
seq_printf(m, "GuC firmware status:\n");
@@ -2449,9 +2499,8 @@ static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint64_t tot = 0;
- uint32_t i;
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
client->priority, client->ctx_index, client->proc_desc_offset);
@@ -2464,11 +2513,11 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[ring->guc_id],
- ring->name);
- tot += client->submissions[ring->guc_id];
+ client->submissions[engine->guc_id],
+ engine->name);
+ tot += client->submissions[engine->guc_id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@@ -2480,11 +2529,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc guc;
struct i915_guc_client client = {};
- struct intel_engine_cs *ring;
- enum intel_ring_id i;
+ struct intel_engine_cs *engine;
u64 total = 0;
- if (!HAS_GUC_SCHED(dev_priv->dev))
+ if (!HAS_GUC_SCHED(dev_priv))
return 0;
if (mutex_lock_interruptible(&dev->struct_mutex))
@@ -2504,11 +2552,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- ring->name, guc.submissions[ring->guc_id],
- guc.last_seqno[ring->guc_id]);
- total += guc.submissions[ring->guc_id];
+ engine->name, guc.submissions[engine->guc_id],
+ guc.last_seqno[engine->guc_id]);
+ total += guc.submissions[engine->guc_id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@@ -2688,10 +2736,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!HAS_RUNTIME_PM(dev)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_RUNTIME_PM(dev_priv))
+ seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
@@ -2702,6 +2748,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
#else
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
+ seq_printf(m, "PCI device power state: %s [%d]\n",
+ pci_power_name(dev_priv->dev->pdev->current_state),
+ dev_priv->dev->pdev->current_state);
return 0;
}
@@ -3114,9 +3163,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
- int i, j, ret;
+ enum intel_engine_id id;
+ int j, ret;
if (!i915_semaphore_is_enabled(dev)) {
seq_puts(m, "Semaphores are disabled\n");
@@ -3135,14 +3185,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
uint64_t offset;
- seq_printf(m, "%s\n", ring->name);
+ seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
- offset = i * I915_NUM_RINGS + j;
+ offset = id * I915_NUM_ENGINES + j;
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
@@ -3150,7 +3200,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
seq_puts(m, " Last wait: ");
for (j = 0; j < num_rings; j++) {
- offset = i + (j * I915_NUM_RINGS);
+ offset = id + (j * I915_NUM_ENGINES);
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
@@ -3160,18 +3210,18 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_ring(ring, dev_priv, i)
+ for_each_engine(engine, dev_priv)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
- I915_READ(ring->semaphore.mbox.signal[j]));
+ I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
seq_puts(m, "\nSync seqno:\n");
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < num_rings; j++) {
- seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
- }
+ for_each_engine(engine, dev_priv) {
+ for (j = 0; j < num_rings; j++)
+ seq_printf(m, " 0x%08x ",
+ engine->semaphore.sync_seqno[j]);
seq_putc(m, '\n');
}
seq_putc(m, '\n');
@@ -3193,8 +3243,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
- pll->config.crtc_mask, pll->active, yesno(pll->on));
+ seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+ pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
@@ -3212,11 +3262,12 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
{
int i;
int ret;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ enum intel_engine_id id;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -3225,9 +3276,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_ring(ring, dev_priv, i)
+ for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
- ring->name, workarounds->hw_whitelist_count[i]);
+ engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
@@ -3417,7 +3468,8 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
intel_dig_port = enc_to_dig_port(encoder);
if (!intel_dig_port->dp.can_mst)
continue;
-
+ seq_printf(m, "MST Source Port %c\n",
+ port_name(intel_dig_port->port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_modeset_unlock_all(dev);
@@ -4693,7 +4745,7 @@ i915_wedged_get(void *data, u64 *val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- *val = atomic_read(&dev_priv->gpu_error.reset_counter);
+ *val = i915_terminally_wedged(&dev_priv->gpu_error);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1c6d227aa..b3198fcd0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,66 @@
#include <linux/pm_runtime.h>
#include <linux/oom.h>
+static unsigned int i915_load_fail_count;
+
+bool __i915_inject_load_failure(const char *func, int line)
+{
+ if (i915_load_fail_count >= i915.inject_load_failure)
+ return false;
+
+ if (++i915_load_fail_count == i915.inject_load_failure) {
+ DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+ i915.inject_load_failure, func, line);
+ return true;
+ }
+
+ return false;
+}
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *dev = dev_priv->dev->dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ if (is_error && !shown_bug_once) {
+ dev_notice(dev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
+
+ va_end(args);
+}
+
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+ return i915.inject_load_failure &&
+ i915_load_fail_count == i915.inject_load_failure;
+}
+
+#define i915_load_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, \
+ i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -87,16 +147,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = intel_ring_initialized(&dev_priv->ring[VCS]);
+ value = intel_engine_initialized(&dev_priv->engine[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = intel_ring_initialized(&dev_priv->ring[BCS]);
+ value = intel_engine_initialized(&dev_priv->engine[BCS]);
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_ring_initialized(&dev_priv->ring[VECS]);
+ value = intel_engine_initialized(&dev_priv->engine[VECS]);
break;
case I915_PARAM_HAS_BSD2:
- value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+ value = intel_engine_initialized(&dev_priv->engine[VCS2]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -197,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
@@ -265,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -283,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev)
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -296,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+ u32 deven_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ deven_val);
} else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+ u32 mchbar_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ mchbar_val);
}
}
@@ -370,6 +430,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
ret = intel_bios_init(dev_priv);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
@@ -413,9 +476,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- /* Always safe in the mode setting case. */
- /* FIXME: do pre/post-mode set stuff in core KMS code */
- dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
@@ -444,7 +504,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
@@ -453,6 +513,7 @@ cleanup_irq:
intel_teardown_gmbus(dev);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
+ intel_power_domains_fini(dev_priv);
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -465,6 +526,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool primary;
int ret;
@@ -472,8 +534,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = dev_priv->gtt.mappable_base;
- ap->ranges[0].size = dev_priv->gtt.mappable_end;
+ ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].size = ggtt->mappable_end;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -853,6 +915,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev);
+ /* Snooping is broken on BXT A stepping. */
+ info->has_snoop = !info->has_llc;
+ info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
+
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
@@ -929,6 +995,84 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+ struct drm_device *dev,
+ struct intel_device_info *info)
+{
+ struct intel_device_info *device_info;
+ int ret = 0;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ /* Setup the write-once "constant" device info */
+ device_info = (struct intel_device_info *)&dev_priv->info;
+ memcpy(device_info, info, sizeof(dev_priv->info));
+ device_info->device_id = dev->pdev->device;
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ mutex_init(&dev_priv->backlight_lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
+ mutex_init(&dev_priv->sb_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->av_mutex);
+ mutex_init(&dev_priv->wm.wm_mutex);
+ mutex_init(&dev_priv->pps_mutex);
+
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
+ intel_pm_setup(dev);
+ intel_init_dpio(dev_priv);
+ intel_power_domains_init(dev_priv);
+ intel_irq_init(dev_priv);
+ intel_init_display_hooks(dev_priv);
+ intel_init_clock_gating_hooks(dev_priv);
+ intel_init_audio_hooks(dev_priv);
+ i915_gem_load_init(dev);
+
+ intel_display_crc_init(dev);
+
+ i915_dump_device_info(dev_priv);
+
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
+
+ return 0;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+ i915_gem_load_cleanup(dev_priv->dev);
+ i915_workqueues_cleanup(dev_priv);
+}
+
static int i915_mmio_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -970,97 +1114,93 @@ static void i915_mmio_cleanup(struct drm_device *dev)
}
/**
- * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
*
- * The driver load routine has to do several things:
- * - drive output discovery via intel_modeset_init()
- * - initialize the memory manager
- * - allocate initial config memory
- * - setup the DRM framebuffer with the allocated memory
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
*/
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv;
- struct intel_device_info *info, *device_info;
- int ret = 0;
- uint32_t aperture_size;
-
- info = (struct intel_device_info *) flags;
-
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- dev->dev_private = dev_priv;
- dev_priv->dev = dev;
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
- /* Setup the write-once "constant" device info */
- device_info = (struct intel_device_info *)&dev_priv->info;
- memcpy(device_info, info, sizeof(dev_priv->info));
- device_info->device_id = dev->pdev->device;
+ if (i915_inject_load_failure())
+ return -ENODEV;
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->backlight_lock);
- spin_lock_init(&dev_priv->uncore.lock);
- spin_lock_init(&dev_priv->mm.object_stat_lock);
- spin_lock_init(&dev_priv->mmio_flip_lock);
- mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->av_mutex);
+ if (i915_get_bridge_dev(dev))
+ return -EIO;
- ret = i915_workqueues_init(dev_priv);
+ ret = i915_mmio_setup(dev);
if (ret < 0)
- goto out_free_priv;
+ goto put_bridge;
- intel_pm_setup(dev);
+ intel_uncore_init(dev);
- intel_runtime_pm_get(dev_priv);
+ return 0;
- intel_display_crc_init(dev);
+put_bridge:
+ pci_dev_put(dev_priv->bridge_dev);
- i915_dump_device_info(dev_priv);
+ return ret;
+}
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
- if (i915_get_bridge_dev(dev)) {
- ret = -EIO;
- goto out_runtime_pm_put;
- }
+ intel_uncore_fini(dev);
+ i915_mmio_cleanup(dev);
+ pci_dev_put(dev_priv->bridge_dev);
+}
- ret = i915_mmio_setup(dev);
- if (ret < 0)
- goto put_bridge;
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ uint32_t aperture_size;
+ int ret;
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev);
+ if (i915_inject_load_failure())
+ return -ENODEV;
- intel_uncore_init(dev);
+ intel_device_info_runtime_init(dev);
- ret = i915_gem_gtt_init(dev);
+ ret = i915_ggtt_init_hw(dev);
if (ret)
- goto out_uncore_fini;
+ return ret;
+
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_gtt;
+ goto out_ggtt;
}
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_gtt;
+ goto out_ggtt;
}
pci_set_master(dev->pdev);
@@ -1080,26 +1220,27 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
- aperture_size = dev_priv->gtt.mappable_end;
+ aperture_size = ggtt->mappable_end;
- dev_priv->gtt.mappable =
- io_mapping_create_wc(dev_priv->gtt.mappable_base,
+ ggtt->mappable =
+ io_mapping_create_wc(ggtt->mappable_base,
aperture_size);
- if (dev_priv->gtt.mappable == NULL) {
+ if (!ggtt->mappable) {
ret = -EIO;
- goto out_gtt;
+ goto out_ggtt;
}
- dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
aperture_size);
- intel_irq_init(dev_priv);
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
intel_uncore_sanitize(dev);
intel_opregion_setup(dev);
- i915_gem_load_init(dev);
- i915_gem_shrinker_init(dev_priv);
+ i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1117,24 +1258,44 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG_DRIVER("can't enable MSI");
}
- intel_device_info_runtime_init(dev);
+ return 0;
- intel_init_dpio(dev_priv);
+out_ggtt:
+ i915_ggtt_cleanup_hw(dev);
- if (INTEL_INFO(dev)->num_pipes) {
- ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
- if (ret)
- goto out_gem_unload;
- }
+ return ret;
+}
- intel_power_domains_init(dev_priv);
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- ret = i915_load_modeset_init(dev);
- if (ret < 0) {
- DRM_ERROR("failed to init modeset\n");
- goto out_power_well;
- }
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_free(ggtt->mappable);
+ i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ i915_gem_shrinker_init(dev_priv);
/*
* Notify a valid surface after modesetting,
* when running inside a VM.
@@ -1144,48 +1305,107 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_setup_sysfs(dev);
- if (INTEL_INFO(dev)->num_pipes) {
+ if (INTEL_INFO(dev_priv)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
}
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
intel_gpu_ips_init(dev_priv);
- intel_runtime_pm_enable(dev_priv);
-
i915_audio_component_init(dev_priv);
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+ i915_audio_component_cleanup(dev_priv);
+ intel_gpu_ips_teardown();
+ acpi_video_unregister();
+ intel_opregion_fini(dev_priv->dev);
+ i915_teardown_sysfs(dev_priv->dev);
+ i915_gem_shrinker_cleanup(dev_priv);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = 0;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev->dev_private = dev_priv;
+ /* Must be set before calling __i915_printk */
+ dev_priv->dev = dev;
+
+ ret = i915_driver_init_early(dev_priv, dev,
+ (struct intel_device_info *)flags);
+
+ if (ret < 0)
+ goto out_free_priv;
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_driver_init_mmio(dev_priv);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_init_hw(dev_priv);
+ if (ret < 0)
+ goto out_cleanup_mmio;
+
+ /*
+ * TODO: move the vblank init and parts of modeset init steps into one
+ * of the i915_driver_init_/i915_driver_register functions according
+ * to the role/effect of the given init step.
+ */
+ if (INTEL_INFO(dev)->num_pipes) {
+ ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+ if (ret)
+ goto out_cleanup_hw;
+ }
+
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0)
+ goto out_cleanup_vblank;
+
+ i915_driver_register(dev_priv);
+
+ intel_runtime_pm_enable(dev_priv);
intel_runtime_pm_put(dev_priv);
return 0;
-out_power_well:
- intel_power_domains_fini(dev_priv);
+out_cleanup_vblank:
drm_vblank_cleanup(dev);
-out_gem_unload:
- i915_gem_shrinker_cleanup(dev_priv);
-
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
- intel_teardown_mchbar(dev);
- pm_qos_remove_request(&dev_priv->pm_qos);
- arch_phys_wc_del(dev_priv->gtt.mtrr);
- io_mapping_free(dev_priv->gtt.mappable);
-out_gtt:
- i915_global_gtt_cleanup(dev);
-out_uncore_fini:
- intel_uncore_fini(dev);
- i915_mmio_cleanup(dev);
-put_bridge:
- pci_dev_put(dev_priv->bridge_dev);
- i915_gem_load_cleanup(dev);
+out_cleanup_hw:
+ i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+ i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
intel_runtime_pm_put(dev_priv);
- i915_workqueues_cleanup(dev_priv);
+ i915_driver_cleanup_early(dev_priv);
out_free_priv:
+ i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+
kfree(dev_priv);
return ret;
@@ -1198,26 +1418,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_fbdev_fini(dev);
- i915_audio_component_cleanup(dev_priv);
-
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
return ret;
}
- intel_power_domains_fini(dev_priv);
-
- intel_gpu_ips_teardown();
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- i915_teardown_sysfs(dev);
-
- i915_gem_shrinker_cleanup(dev_priv);
-
- io_mapping_free(dev_priv->gtt.mappable);
- arch_phys_wc_del(dev_priv->gtt.mtrr);
-
- acpi_video_unregister();
+ i915_driver_unregister(dev_priv);
drm_vblank_cleanup(dev);
@@ -1246,31 +1455,24 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
- intel_opregion_fini(dev);
-
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ intel_power_domains_fini(dev_priv);
- i915_global_gtt_cleanup(dev);
+ i915_driver_cleanup_hw(dev_priv);
+ i915_driver_cleanup_mmio(dev_priv);
- intel_uncore_fini(dev);
- i915_mmio_cleanup(dev);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- i915_gem_load_cleanup(dev);
- pci_dev_put(dev_priv->bridge_dev);
- i915_workqueues_cleanup(dev_priv);
+ i915_driver_cleanup_early(dev_priv);
kfree(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6d2fb3f4a..85c4debf4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -66,6 +66,11 @@ static struct drm_driver driver;
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+#define BDW_COLORS \
+ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = {
.is_mobile = 1,
};
+#define BDW_FEATURES \
+ HSW_FEATURES, \
+ BDW_COLORS
+
static const struct intel_device_info intel_broadwell_d_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8,
};
static const struct intel_device_info intel_broadwell_m_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8, .is_mobile = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
+ CHV_COLORS,
};
static const struct intel_device_info intel_skylake_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_skylake_gt3_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -345,18 +355,17 @@ static const struct intel_device_info intel_broxton_info = {
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
+ BDW_COLORS,
};
static const struct intel_device_info intel_kabylake_info = {
- HSW_FEATURES,
- .is_preliminary = 1,
+ BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
- HSW_FEATURES,
- .is_preliminary = 1,
+ BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -503,7 +512,12 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
+ } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_KBP;
+ DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ WARN_ON(!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+ (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) {
@@ -557,10 +571,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
-static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
@@ -630,8 +643,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
- if (HAS_CSR(dev_priv))
- flush_work(&dev_priv->csr.work);
+ intel_csr_ucode_suspend(dev_priv);
out:
enable_rpm_wakeref_asserts(dev_priv);
@@ -647,7 +659,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
- fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+ fw_csr = !IS_BROXTON(dev_priv) &&
+ suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
@@ -658,7 +671,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
if (!fw_csr)
intel_power_domains_suspend(dev_priv);
- ret = intel_suspend_complete(dev_priv);
+ ret = 0;
+ if (IS_BROXTON(dev_priv))
+ bxt_enable_dc9(dev_priv);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_enable_pc8(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
@@ -719,9 +738,16 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
disable_rpm_wakeref_asserts(dev_priv);
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret)
+ DRM_ERROR("failed to re-enable GGTT\n");
+
+ intel_csr_ucode_resume(dev_priv);
+
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
@@ -850,21 +876,25 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_early_sanitize(dev, true);
- if (IS_BROXTON(dev))
- ret = bxt_resume_prepare(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_BROXTON(dev)) {
+ if (!dev_priv->suspended_to_idle)
+ gen9_sanitize_dc_state(dev_priv);
+ bxt_disable_dc9(dev_priv);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
+ }
intel_uncore_sanitize(dev);
- if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+ if (IS_BROXTON(dev_priv) ||
+ !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
+ enable_rpm_wakeref_asserts(dev_priv);
+
out:
dev_priv->suspended_to_idle = false;
- enable_rpm_wakeref_asserts(dev_priv);
-
return ret;
}
@@ -900,23 +930,32 @@ int i915_resume_switcheroo(struct drm_device *dev)
int i915_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- bool simulated;
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+ unsigned reset_counter;
int ret;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
- i915_gem_reset(dev);
+ /* Clear any previous failed attempts at recovery. Time to try again. */
+ atomic_andnot(I915_WEDGED, &error->reset_counter);
- simulated = dev_priv->gpu_error.stop_rings != 0;
+ /* Clear the reset-in-progress flag and increment the reset epoch. */
+ reset_counter = atomic_inc_return(&error->reset_counter);
+ if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ i915_gem_reset(dev);
- ret = intel_gpu_reset(dev);
+ ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */
- if (simulated) {
+ if (error->stop_rings != 0) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->gpu_error.stop_rings = 0;
+ error->stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@@ -928,9 +967,11 @@ int i915_reset(struct drm_device *dev)
pr_notice("drm/i915: Resetting chip after gpu hang\n");
if (ret) {
- DRM_ERROR("Failed to reset chip: %i\n", ret);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (ret != -ENODEV)
+ DRM_ERROR("Failed to reset chip: %i\n", ret);
+ else
+ DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ goto error;
}
intel_overlay_reset(dev_priv);
@@ -949,20 +990,14 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- dev_priv->gpu_error.reload_in_reset = true;
-
ret = i915_gem_init_hw(dev);
-
- dev_priv->gpu_error.reload_in_reset = false;
-
- mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
- return ret;
+ goto error;
}
+ mutex_unlock(&dev->struct_mutex);
+
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
@@ -973,6 +1008,11 @@ int i915_reset(struct drm_device *dev)
intel_enable_gt_powersave(dev);
return 0;
+
+error:
+ atomic_or(I915_WEDGED, &error->reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1079,44 +1119,6 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
-static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
-{
- hsw_enable_pc8(dev_priv);
-
- return 0;
-}
-
-static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /* TODO: when DC5 support is added disable DC5 here. */
-
- broxton_ddi_phy_uninit(dev);
- broxton_uninit_cdclk(dev);
- bxt_enable_dc9(dev_priv);
-
- return 0;
-}
-
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /* TODO: when CSR FW support is added make sure the FW is loaded */
-
- bxt_disable_dc9(dev_priv);
-
- /*
- * TODO: when DC5 support is added enable DC5 here if the CSR FW
- * is available.
- */
- broxton_init_cdclk(dev);
- broxton_ddi_phy_init(dev);
-
- return 0;
-}
-
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1420,7 +1422,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err)
goto err2;
- if (!IS_CHERRYVIEW(dev_priv->dev))
+ if (!IS_CHERRYVIEW(dev_priv))
vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
@@ -1452,7 +1454,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- if (!IS_CHERRYVIEW(dev_priv->dev))
+ if (!IS_CHERRYVIEW(dev_priv))
vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
@@ -1522,7 +1524,16 @@ static int intel_runtime_suspend(struct device *device)
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
- ret = intel_suspend_complete(dev_priv);
+ ret = 0;
+ if (IS_BROXTON(dev_priv)) {
+ bxt_display_core_uninit(dev_priv);
+ bxt_enable_dc9(dev_priv);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ hsw_enable_pc8(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ ret = vlv_suspend_complete(dev_priv);
+ }
+
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1596,12 +1607,17 @@ static int intel_runtime_resume(struct device *device)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev))
- ret = bxt_resume_prepare(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_BROXTON(dev)) {
+ bxt_disable_dc9(dev_priv);
+ bxt_display_core_init(dev_priv, true);
+ if (dev_priv->csr.dmc_payload &&
+ (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ gen9_enable_dc5(dev_priv);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = vlv_resume_prepare(dev_priv, true);
+ }
/*
* No point of rolling back things in case of an error, as the best
@@ -1632,26 +1648,6 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
-/*
- * This function implements common functionality of runtime and system
- * suspend sequence.
- */
-static int intel_suspend_complete(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (IS_BROXTON(dev_priv))
- ret = bxt_suspend_complete(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- ret = hsw_suspend_complete(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
- else
- ret = 0;
-
- return ret;
-}
-
static const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
@@ -1772,10 +1768,8 @@ static int __init i915_init(void)
if (i915.modeset == 0)
driver.driver_features &= ~DRIVER_MODESET;
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
-#endif
if (!(driver.driver_features & DRIVER_MODESET)) {
/* Silently fail loading to not upset userspace. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3f69a6792..489c72885 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -33,33 +33,40 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
-#include <drm/drmP.h>
-#include "i915_params.h"
-#include "i915_reg.h"
-#include "intel_bios.h"
-#include "intel_ringbuffer.h"
-#include "intel_lrc.h"
-#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
-#include <drm/drm_gem.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+#include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
+
+#include "i915_params.h"
+#include "i915_reg.h"
+
+#include "intel_bios.h"
+#include "intel_dpll_mgr.h"
#include "intel_guc.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160229"
+#define DRIVER_DATE "20160425"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -97,6 +104,10 @@
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+bool __i915_inject_load_failure(const char *func, int line);
+#define i915_inject_load_failure() \
+ __i915_inject_load_failure(__func__, __LINE__)
+
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
@@ -122,9 +133,35 @@ enum transcoder {
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
I915_MAX_TRANSCODERS
};
-#define transcoder_name(t) ((t) + 'A')
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
/*
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
@@ -176,6 +213,8 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
@@ -273,6 +312,10 @@ struct i915_hotplug {
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if ((__ports_mask) & (1 << (__port)))
+
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -340,81 +383,6 @@ struct drm_i915_file_private {
unsigned int bsd_ring;
};
-enum intel_dpll_id {
- DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
- /* real shared dpll ids must be >= 0 */
- DPLL_ID_PCH_PLL_A = 0,
- DPLL_ID_PCH_PLL_B = 1,
- /* hsw/bdw */
- DPLL_ID_WRPLL1 = 0,
- DPLL_ID_WRPLL2 = 1,
- DPLL_ID_SPLL = 2,
-
- /* skl */
- DPLL_ID_SKL_DPLL1 = 0,
- DPLL_ID_SKL_DPLL2 = 1,
- DPLL_ID_SKL_DPLL3 = 2,
-};
-#define I915_NUM_PLLS 3
-
-struct intel_dpll_hw_state {
- /* i9xx, pch plls */
- uint32_t dpll;
- uint32_t dpll_md;
- uint32_t fp0;
- uint32_t fp1;
-
- /* hsw, bdw */
- uint32_t wrpll;
- uint32_t spll;
-
- /* skl */
- /*
- * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
- * lower part of ctrl1 and they get shifted into position when writing
- * the register. This allows us to easily compare the state to share
- * the DPLL.
- */
- uint32_t ctrl1;
- /* HDMI only, 0 when used for DP */
- uint32_t cfgcr1, cfgcr2;
-
- /* bxt */
- uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
- pcsdw12;
-};
-
-struct intel_shared_dpll_config {
- unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
- struct intel_dpll_hw_state hw_state;
-};
-
-struct intel_shared_dpll {
- struct intel_shared_dpll_config config;
-
- int active; /* count of number of active CRTCs (i.e. DPMS on) */
- bool on; /* is the PLL actually active? Disabled during modeset */
- const char *name;
- /* should match the index in the dev_priv->shared_dplls array */
- enum intel_dpll_id id;
- /* The mode_set hook is optional and should be used together with the
- * intel_prepare_shared_dpll function. */
- void (*mode_set)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
- void (*enable)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
- void (*disable)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
- bool (*get_hw_state)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
-};
-
-#define SKL_DPLL0 0
-#define SKL_DPLL1 1
-#define SKL_DPLL2 2
-#define SKL_DPLL3 3
-
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
@@ -533,7 +501,8 @@ struct drm_i915_error_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
- u32 semaphore_seqno[I915_NUM_RINGS - 1];
+ u32 last_seqno;
+ u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@@ -553,7 +522,7 @@ struct drm_i915_error_state {
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+ u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct drm_i915_error_object {
int page_count;
@@ -561,6 +530,8 @@ struct drm_i915_error_state {
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+ struct drm_i915_error_object *wa_ctx;
+
struct drm_i915_error_request {
long jiffies;
u32 seqno;
@@ -577,12 +548,12 @@ struct drm_i915_error_state {
pid_t pid;
char comm[TASK_COMM_LEN];
- } ring[I915_NUM_RINGS];
+ } ring[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno[I915_NUM_RINGS], wseqno;
+ u32 rseqno[I915_NUM_ENGINES], wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -611,27 +582,12 @@ struct dpll;
struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
- /**
- * find_dpll() - Find the best values for the PLL
- * @limit: limits for the PLL
- * @crtc: current CRTC
- * @target: target frequency in kHz
- * @refclk: reference clock frequency in kHz
- * @match_clock: if provided, @best_clock P divider must
- * match the P divider from @match_clock
- * used for LVDS downclocking
- * @best_clock: best PLL values found
- *
- * Returns true on success, false on failure.
- */
- bool (*find_dpll)(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk,
- struct dpll *match_clock,
- struct dpll *best_clock);
- int (*compute_pipe_wm)(struct intel_crtc *crtc,
- struct drm_atomic_state *state);
- void (*program_watermarks)(struct intel_crtc_state *cstate);
+ int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
+ int (*compute_intermediate_wm)(struct drm_device *dev,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *newstate);
+ void (*initial_watermarks)(struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -662,6 +618,9 @@ struct drm_i915_display_funcs {
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
+
+ void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
+ void (*load_luts)(struct drm_crtc_state *crtc_state);
};
enum forcewake_domain_id {
@@ -681,6 +640,13 @@ enum forcewake_domains {
FORCEWAKE_MEDIA)
};
+#define FW_REG_READ (1)
+#define FW_REG_WRITE (2)
+
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, unsigned int op);
+
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@@ -713,8 +679,9 @@ struct intel_uncore {
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
enum forcewake_domain_id id;
+ enum forcewake_domains mask;
unsigned wake_count;
- struct timer_list timer;
+ struct hrtimer timer;
i915_reg_t reg_set;
u32 val_set;
u32 val_clear;
@@ -727,14 +694,14 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
- for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
- (i__) < FW_DOMAIN_ID_COUNT; \
- (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
- for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
+ for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+ (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
+ (domain__)++) \
+ for_each_if ((mask__) & (domain__)->mask)
-#define for_each_fw_domain(domain__, dev_priv__, i__) \
- for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+#define for_each_fw_domain(domain__, dev_priv__) \
+ for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
@@ -750,6 +717,7 @@ struct intel_csr {
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
uint32_t dc_state;
+ uint32_t allowed_dc_mask;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -779,6 +747,7 @@ struct intel_csr {
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_llc) sep \
+ func(has_snoop) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
@@ -810,6 +779,11 @@ struct intel_device_info {
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
+
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
};
#undef DEFINE_FLAG
@@ -891,7 +865,7 @@ struct intel_context {
struct i915_vma *lrc_vma;
u64 lrc_desc;
uint32_t *lrc_reg_state;
- } engine[I915_NUM_RINGS];
+ } engine[I915_NUM_ENGINES];
struct list_head link;
};
@@ -1016,6 +990,7 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
+ PCH_KBP, /* Kabypoint PCH */
PCH_NOP,
};
@@ -1036,6 +1011,7 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
+#define GMBUS_FORCE_BIT_RETRY (1U << 31)
u32 force_bit;
u32 reg0;
i915_reg_t gpio_reg;
@@ -1159,6 +1135,7 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
u8 up_threshold; /* Current %busy required to uplock */
u8 down_threshold; /* Current %busy required to downclock */
@@ -1298,6 +1275,7 @@ struct i915_gem_mm {
struct i915_hw_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier;
+ struct notifier_block vmap_notifier;
struct shrinker shrinker;
bool shrinker_no_lock_stealing;
@@ -1423,9 +1401,6 @@ struct i915_gpu_error {
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- bool reload_in_reset;
};
enum modeset_restore {
@@ -1482,21 +1457,23 @@ struct intel_vbt_data {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
- unsigned int has_mipi:1;
+ unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drrs_support_type drrs_type;
- /* eDP */
- int edp_rate;
- int edp_lanes;
- int edp_preemphasis;
- int edp_vswing;
- bool edp_initialized;
- bool edp_support;
- int edp_bpp;
- struct edp_power_seq edp_pps;
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+ bool low_vswing;
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
struct {
bool full_link;
@@ -1516,7 +1493,6 @@ struct intel_vbt_data {
/* MIPI DSI */
struct {
- u16 port;
u16 panel_id;
struct mipi_config *config;
struct mipi_pps_data *pps;
@@ -1532,6 +1508,7 @@ struct intel_vbt_data {
union child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+ struct sdvo_device_mapping sdvo_mappings[2];
};
enum intel_ddb_partitioning {
@@ -1706,7 +1683,7 @@ struct i915_wa_reg {
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
- u32 hw_whitelist_count[I915_NUM_RINGS];
+ u32 hw_whitelist_count[I915_NUM_ENGINES];
};
struct i915_virtual_gpu {
@@ -1719,7 +1696,7 @@ struct i915_execbuffer_params {
uint32_t dispatch_flags;
uint32_t args_batch_start_offset;
uint64_t batch_obj_vm_offset;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch_obj;
struct intel_context *ctx;
struct drm_i915_gem_request *request;
@@ -1771,7 +1748,7 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_engine_cs ring[I915_NUM_RINGS];
+ struct intel_engine_cs engine[I915_NUM_ENGINES];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
@@ -1829,6 +1806,7 @@ struct drm_i915_private {
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
+ unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@@ -1855,7 +1833,7 @@ struct drm_i915_private {
struct drm_atomic_state *modeset_restore_state;
struct list_head vm_list; /* Global list of all address spaces */
- struct i915_gtt gtt; /* VM representing the global address space */
+ struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
DECLARE_HASHTABLE(mm_structs, 7);
@@ -1863,8 +1841,6 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct sdvo_device_mapping sdvo_mappings[2];
-
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
@@ -1876,6 +1852,14 @@ struct drm_i915_private {
/* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *dpll_mgr;
+
+ /*
+ * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms
+ * plls share registers.
+ */
+ struct mutex dpll_lock;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
@@ -1884,9 +1868,6 @@ struct drm_i915_private {
struct i915_workarounds workarounds;
- /* Reclocking support */
- bool render_reclock_avail;
-
struct i915_frontbuffer_tracking fb_tracking;
u16 orig_clock;
@@ -1896,7 +1877,7 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
/* Cannot be determined by PCIID. You must always read a register. */
- size_t ellc_size;
+ u32 edram_cap;
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
@@ -1936,7 +1917,15 @@ struct drm_i915_private {
u32 fdi_rx_config;
+ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
u32 chv_phy_control;
+ /*
+ * Shadows for CHV DPLL_MD regs to keep the state
+ * checker somewhat working in the presence hardware
+ * crappiness (can't read out DPLL_MD for pipes B & C).
+ */
+ u32 chv_dpll_md[I915_MAX_PIPES];
+ u32 bxt_phy_grc;
u32 suspend_count;
bool suspended_to_idle;
@@ -1982,6 +1971,13 @@ struct drm_i915_private {
};
uint8_t max_level;
+
+ /*
+ * Should be held around atomic WM register writing; also
+ * protects * intel_crtc->wm.active and
+ * cstate->wm.need_postvbl_update.
+ */
+ struct mutex wm_mutex;
} wm;
struct i915_runtime_pm pm;
@@ -1991,15 +1987,13 @@ struct drm_i915_private {
int (*execbuf_submit)(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
- int (*init_rings)(struct drm_device *dev);
- void (*cleanup_ring)(struct intel_engine_cs *ring);
- void (*stop_ring)(struct intel_engine_cs *ring);
+ int (*init_engines)(struct drm_device *dev);
+ void (*cleanup_engine)(struct intel_engine_cs *engine);
+ void (*stop_engine)(struct intel_engine_cs *engine);
} gt;
struct intel_context *kernel_context;
- bool edp_low_vswing;
-
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2026,10 +2020,28 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return container_of(guc, struct drm_i915_private, guc);
}
-/* Iterate over initialised rings */
-#define for_each_ring(ring__, dev_priv__, i__) \
- for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
- for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
+/* Simple iterator over all initialised engines */
+#define for_each_engine(engine__, dev_priv__) \
+ for ((engine__) = &(dev_priv__)->engine[0]; \
+ (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+ (engine__)++) \
+ for_each_if (intel_engine_initialized(engine__))
+
+/* Iterator with engine_id */
+#define for_each_engine_id(engine__, dev_priv__, id__) \
+ for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
+ (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+ (engine__)++) \
+ for_each_if (((id__) = (engine__)->id, \
+ intel_engine_initialized(engine__)))
+
+/* Iterator over subset of engines selected by mask */
+#define for_each_engine_masked(engine__, dev_priv__, mask__) \
+ for ((engine__) = &(dev_priv__)->engine[0]; \
+ (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+ (engine__)++) \
+ for_each_if (((mask__) & intel_engine_flag(engine__)) && \
+ intel_engine_initialized(engine__))
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2099,7 +2111,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
struct list_head global_list;
- struct list_head ring_list[I915_NUM_RINGS];
+ struct list_head engine_list[I915_NUM_ENGINES];
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
@@ -2110,7 +2122,7 @@ struct drm_i915_gem_object {
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
- unsigned int active:I915_NUM_RINGS;
+ unsigned int active:I915_NUM_ENGINES;
/**
* This is set if the object has been written to since last bound
@@ -2174,10 +2186,7 @@ struct drm_i915_gem_object {
struct scatterlist *sg;
int last;
} get_page;
-
- /* prime dma-buf support */
- void *dma_buf_vmapping;
- int vmapping_count;
+ void *mapping;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
@@ -2189,7 +2198,7 @@ struct drm_i915_gem_object {
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
* */
- struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
+ struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
struct drm_i915_gem_request *last_fenced_req;
@@ -2244,7 +2253,8 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct drm_i915_private *i915;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
+ unsigned reset_counter;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -2325,7 +2335,6 @@ struct drm_i915_gem_request {
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
@@ -2337,9 +2346,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
}
static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
{
- return req ? req->ring : NULL;
+ return req ? req->engine : NULL;
}
static inline struct drm_i915_gem_request *
@@ -2353,7 +2362,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
@@ -2365,7 +2374,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
if (!req)
return;
- dev = req->ring->dev;
+ dev = req->engine->dev;
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
}
@@ -2495,6 +2504,7 @@ struct drm_i915_cmd_table {
__p; \
})
#define INTEL_INFO(p) (&__I915__(p)->info)
+#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
@@ -2593,6 +2603,15 @@ struct drm_i915_cmd_table {
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define KBL_REVID_A0 0x0
+#define KBL_REVID_B0 0x1
+#define KBL_REVID_C0 0x2
+#define KBL_REVID_D0 0x3
+#define KBL_REVID_E0 0x4
+
+#define IS_KBL_REVID(p, since, until) \
+ (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2613,13 +2632,17 @@ struct drm_i915_cmd_table {
#define BLT_RING (1<<BCS)
#define VEBOX_RING (1<<VECS)
#define BSD2_RING (1<<VCS2)
+#define ALL_ENGINES (~0)
+
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
+#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- __I915__(dev)->ellc_size)
+ HAS_EDRAM(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2673,7 +2696,7 @@ struct drm_i915_cmd_table {
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
- IS_KABYLAKE(dev))
+ IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
@@ -2697,10 +2720,13 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
@@ -2729,6 +2755,13 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
/* i915_dma.c */
+void __printf(3, 4)
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...);
+
+#define i915_report_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2741,9 +2774,11 @@ extern void i915_driver_postclose(struct drm_device *dev,
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int intel_gpu_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
+extern int intel_guc_reset(struct drm_i915_private *dev_priv);
+extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2760,7 +2795,7 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
__printf(3, 4)
-void i915_handle_error(struct drm_device *dev, bool wedged,
+void i915_handle_error(struct drm_device *dev, u32 engine_mask,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
@@ -2787,6 +2822,8 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
+
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev)
{
@@ -2865,7 +2902,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req);
-void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
@@ -2896,6 +2932,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
+void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2980,12 +3017,46 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
}
+
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
}
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj - the object to map into kernel address space
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space.
+ *
+ * The caller must hold the struct_mutex, and is responsible for calling
+ * i915_gem_object_unpin_map() when the mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj - the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ *
+ * The caller must hold the struct_mutex.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
@@ -3009,42 +3080,68 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
- u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
- return i915_seqno_passed(seqno, req->previous_seqno);
+ if (!lazy_coherency && req->engine->irq_seqno_barrier)
+ req->engine->irq_seqno_barrier(req->engine);
+ return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ req->previous_seqno);
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
- u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
- return i915_seqno_passed(seqno, req->seqno);
+ if (!lazy_coherency && req->engine->irq_seqno_barrier)
+ req->engine->irq_seqno_barrier(req->engine);
+ return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ req->seqno);
}
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring);
+i915_gem_find_active_request(struct intel_engine_cs *engine);
bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
- bool interruptible);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
+
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+ return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
+{
+ return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+ return unlikely(reset & I915_WEDGED);
+}
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
- return unlikely(atomic_read(&error->reset_counter)
- & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+ return __i915_reset_in_progress(i915_reset_counter(error));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+ return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) & I915_WEDGED;
+ return __i915_terminally_wedged(i915_reset_counter(error));
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+ return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
@@ -3062,11 +3159,11 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_rings(struct drm_device *dev);
+int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req,
@@ -3077,7 +3174,6 @@ void __i915_add_request(struct drm_i915_gem_request *req,
#define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps);
@@ -3157,13 +3253,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
- (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- WARN_ON(i915_is_ggtt(vm));
return container_of(vm, struct i915_hw_ppgtt, base);
}
@@ -3176,7 +3268,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ return i915_gem_obj_size(obj, &ggtt->base);
}
static inline int __must_check
@@ -3184,7 +3279,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
unsigned flags)
{
- return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ return i915_gem_object_pin(obj, &ggtt->base,
alignment, flags | PIN_GLOBAL);
}
@@ -3299,6 +3397,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
+#define I915_SHRINK_VMAPS 0x10
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
@@ -3345,7 +3444,7 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev, bool wedge,
+void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
@@ -3357,10 +3456,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
+int i915_parse_cmds(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
@@ -3394,6 +3493,14 @@ extern void intel_i2c_reset(struct drm_device *dev);
/* intel_bios.c */
int intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+ enum port port);
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@@ -3405,6 +3512,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
+extern int intel_opregion_get_panel_type(struct drm_device *dev);
#else
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
@@ -3420,6 +3528,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
return 0;
}
+static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+{
+ return -ENODEV;
+}
#endif
/* intel_acpi.c */
@@ -3579,11 +3691,6 @@ static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
return VGACNTRL;
}
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
@@ -3631,11 +3738,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req)
{
- if (ring->trace_irq_req == NULL && ring->irq_get(ring))
- i915_gem_request_assign(&ring->trace_irq_req, req);
+ if (engine->trace_irq_req == NULL && engine->irq_get(engine))
+ i915_gem_request_assign(&engine->trace_irq_req, req);
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dabc08987..aad26851c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -32,14 +32,13 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_mocs.h"
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
-#define RQ_BUG_ON(expr)
-
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void
@@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error) || \
- i915_terminally_wedged(error))
- if (EXIT_COND)
+ if (!i915_reset_in_progress(error))
return 0;
/*
@@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
- EXIT_COND,
+ !i915_reset_in_progress(error),
10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
+ } else {
+ return 0;
}
-#undef EXIT_COND
-
- return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -130,9 +126,9 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_get_aperture *args = data;
- struct i915_gtt *ggtt = &dev_priv->gtt;
struct i915_vma *vma;
size_t pinned;
@@ -146,7 +142,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.base.total;
+ args->aper_size = ggtt->base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret) {
+ if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
- WARN_ON(ret != -EIO);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -324,7 +319,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
- char __user *user_data = to_user_ptr(args->data_ptr);
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
int ret = 0;
/* We manually control the domain here and pretend that it
@@ -605,7 +600,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -692,7 +687,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_WRITE,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -700,7 +695,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -765,7 +760,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -783,7 +779,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret)
goto out_unpin;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@ -807,7 +803,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->gtt.mappable, page_base,
+ if (fast_user_write(ggtt->mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_flush;
@@ -907,7 +903,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int needs_clflush_before = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -1036,12 +1032,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_READ,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
if (likely(!i915.prefault_disable)) {
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
args->size);
if (ret)
return -EFAULT;
@@ -1053,7 +1049,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
goto put_rpm;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1109,27 +1105,19 @@ put_rpm:
return ret;
}
-int
-i915_gem_check_wedge(struct i915_gpu_error *error,
- bool interruptible)
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
{
- if (i915_reset_in_progress(error)) {
+ if (__i915_terminally_wedged(reset_counter))
+ return -EIO;
+
+ if (__i915_reset_in_progress(reset_counter)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
- /* Recovery complete, but the reset failed ... */
- if (i915_terminally_wedged(error))
- return -EIO;
-
- /*
- * Check if GPU Reset is in progress - we need intel_ring_begin
- * to work properly to reinit the hw state while the gpu is
- * still marked as reset-in-progress. Handle this with a flag.
- */
- if (!error->reload_in_reset)
- return -EAGAIN;
+ return -EAGAIN;
}
return 0;
@@ -1141,9 +1129,9 @@ static void fake_irq(unsigned long data)
}
static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+ return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
}
static unsigned long local_clock_us(unsigned *cpu)
@@ -1193,7 +1181,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* takes to sleep on a request, on the order of a microsecond.
*/
- if (req->ring->irq_refcount)
+ if (req->engine->irq_refcount)
return -EBUSY;
/* Only spin if we know the GPU is processing this request */
@@ -1223,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
/**
* __i915_wait_request - wait until execution of request has finished
* @req: duh!
- * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1238,16 +1225,15 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* errno with remaining time filled in timeout argument.
*/
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps)
{
- struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
- ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+ ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
@@ -1288,7 +1274,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (ret == 0)
goto out;
- if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+ if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
ret = -ENODEV;
goto out;
}
@@ -1296,16 +1282,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
- prepare_to_wait(&ring->irq_queue, &wait, state);
+ prepare_to_wait(&engine->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
- * the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
- /* ... but upgrade the -EAGAIN to an -EIO if the gpu
- * is truely gone. */
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret == 0)
- ret = -EAGAIN;
+ * the request being submitted and now. If a reset has occurred,
+ * the request is effectively complete (we either are in the
+ * process of or have discarded the rendering and completely
+ * reset the GPU. The results of the request are lost and we
+ * are free to continue on with the original operation.
+ */
+ if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
+ ret = 0;
break;
}
@@ -1325,11 +1312,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
}
timer.function = NULL;
- if (timeout || missed_irq(dev_priv, ring)) {
+ if (timeout || missed_irq(dev_priv, engine)) {
unsigned long expire;
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+ expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
mod_timer(&timer, expire);
}
@@ -1341,9 +1328,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
}
}
if (!irq_test_in_progress)
- ring->irq_put(ring);
+ engine->irq_put(engine);
- finish_wait(&ring->irq_queue, &wait);
+ finish_wait(&engine->irq_queue, &wait);
out:
trace_i915_gem_request_wait_end(req);
@@ -1370,7 +1357,6 @@ out:
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file)
{
- struct drm_i915_private *dev_private;
struct drm_i915_file_private *file_priv;
WARN_ON(!req || !file || req->file_priv);
@@ -1381,7 +1367,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
if (req->file_priv)
return -EINVAL;
- dev_private = req->ring->dev->dev_private;
file_priv = file->driver_priv;
spin_lock(&file_priv->mm.lock);
@@ -1434,7 +1419,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
static void
__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&engine->dev->struct_mutex);
@@ -1459,30 +1444,22 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
int
i915_wait_request(struct drm_i915_gem_request *req)
{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv = req->i915;
bool interruptible;
int ret;
- BUG_ON(req == NULL);
-
- dev = req->ring->dev;
- dev_priv = dev->dev_private;
interruptible = dev_priv->mm.interruptible;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
- ret = __i915_wait_request(req,
- atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
- if (ret)
- return ret;
+ /* If the GPU hung, we want to keep the requests to find the guilty. */
+ if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+ __i915_gem_request_retire__upto(req);
- __i915_gem_request_retire__upto(req);
return 0;
}
@@ -1505,14 +1482,14 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- i = obj->last_write_req->ring->id;
+ i = obj->last_write_req->engine->id;
if (obj->last_read_req[i] == obj->last_write_req)
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
} else {
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL)
continue;
@@ -1522,7 +1499,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
i915_gem_object_retire__read(obj, i);
}
- RQ_BUG_ON(obj->active);
+ GEM_BUG_ON(obj->active);
}
return 0;
@@ -1532,14 +1509,15 @@ static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
- int ring = req->ring->id;
+ int ring = req->engine->id;
if (obj->last_read_req[ring] == req)
i915_gem_object_retire__read(obj, ring);
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
- __i915_gem_request_retire__upto(req);
+ if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+ __i915_gem_request_retire__upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1552,8 +1530,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *requests[I915_NUM_RINGS];
- unsigned reset_counter;
+ struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1562,12 +1539,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!obj->active)
return 0;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
- if (ret)
- return ret;
-
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-
if (readonly) {
struct drm_i915_gem_request *req;
@@ -1577,7 +1548,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
requests[n++] = i915_gem_request_reference(req);
} else {
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
@@ -1589,9 +1560,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
}
mutex_unlock(&dev->struct_mutex);
+ ret = 0;
for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], reset_counter, true,
- NULL, rps);
+ ret = __i915_wait_request(requests[i], true, NULL, rps);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) {
@@ -1640,7 +1611,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1688,7 +1659,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1732,10 +1703,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
- if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+ if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (obj == NULL)
return -ENOENT;
@@ -1754,7 +1725,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ if (down_write_killable(&mm->mmap_sem)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINTR;
+ }
vma = find_vma(mm, addr);
if (vma)
vma->vm_page_prot =
@@ -1792,7 +1766,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset;
unsigned long pfn;
@@ -1827,7 +1802,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Use a partial view if the object is bigger than the aperture. */
- if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ if (obj->base.size >= ggtt->mappable_end &&
obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB
@@ -1855,7 +1830,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base +
+ pfn = ggtt->mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
@@ -1964,11 +1939,27 @@ out:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ /* Serialisation between user GTT access and our code depends upon
+ * revoking the CPU's PTE whilst the mutex is held. The next user
+ * pagefault then has to wait until we release the mutex.
+ */
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
if (!obj->fault_mappable)
return;
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
+
+ /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ * memory transactions from userspace before we return. The TLB
+ * flushing implied above by changing the PTE above *should* be
+ * sufficient, an extra barrier here just provides us with a bit
+ * of paranoid documentation about our requirement to serialise
+ * memory writes before touching registers / GSM.
+ */
+ wmb();
+
obj->fault_mappable = false;
}
@@ -2033,9 +2024,6 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (drm_vma_node_has_offset(&obj->base.vma_node))
- return 0;
-
dev_priv->mm.shrinker_no_lock_stealing = true;
ret = drm_gem_create_mmap_offset(&obj->base);
@@ -2084,7 +2072,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -2180,11 +2168,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret) {
+ if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
- WARN_ON(ret != -EIO);
i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2232,6 +2219,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* lists early. */
list_del(&obj->global_list);
+ if (obj->mapping) {
+ if (is_vmalloc_addr(obj->mapping))
+ vunmap(obj->mapping);
+ else
+ kunmap(kmap_to_page(obj->mapping));
+ obj->mapping = NULL;
+ }
+
ops->put_pages(obj);
obj->pages = NULL;
@@ -2400,21 +2395,64 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ i915_gem_object_pin_pages(obj);
+
+ if (obj->mapping == NULL) {
+ struct page **pages;
+
+ pages = NULL;
+ if (obj->base.size == PAGE_SIZE)
+ obj->mapping = kmap(sg_page(obj->pages->sgl));
+ else
+ pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
+ sizeof(*pages),
+ GFP_TEMPORARY);
+ if (pages != NULL) {
+ struct sg_page_iter sg_iter;
+ int n;
+
+ n = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter,
+ obj->pages->nents, 0)
+ pages[n++] = sg_page_iter_page(&sg_iter);
+
+ obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+ }
+ if (obj->mapping == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ return obj->mapping;
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
- ring = i915_gem_request_get_ring(req);
+ engine = i915_gem_request_get_engine(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
drm_gem_object_reference(&obj->base);
- obj->active |= intel_ring_flag(ring);
+ obj->active |= intel_engine_flag(engine);
- list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
- i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+ list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
+ i915_gem_request_assign(&obj->last_read_req[engine->id], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
@@ -2422,8 +2460,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
- RQ_BUG_ON(obj->last_write_req == NULL);
- RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+ GEM_BUG_ON(obj->last_write_req == NULL);
+ GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
i915_gem_request_assign(&obj->last_write_req, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2434,13 +2472,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
{
struct i915_vma *vma;
- RQ_BUG_ON(obj->last_read_req[ring] == NULL);
- RQ_BUG_ON(!(obj->active & (1 << ring)));
+ GEM_BUG_ON(obj->last_read_req[ring] == NULL);
+ GEM_BUG_ON(!(obj->active & (1 << ring)));
- list_del_init(&obj->ring_list[ring]);
+ list_del_init(&obj->engine_list[ring]);
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
- if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+ if (obj->last_write_req && obj->last_write_req->engine->id == ring)
i915_gem_object_retire__write(obj);
obj->active &= ~(1 << ring);
@@ -2467,24 +2505,20 @@ static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i, j;
+ struct intel_engine_cs *engine;
+ int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_ring(ring, dev_priv, i) {
- ret = intel_ring_idle(ring);
+ for_each_engine(engine, dev_priv) {
+ ret = intel_engine_idle(engine);
if (ret)
return ret;
}
i915_gem_retire_requests(dev);
/* Finally reset hw state */
- for_each_ring(ring, dev_priv, i) {
- intel_ring_init_seqno(ring, seqno);
-
- for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
- ring->semaphore.sync_seqno[j] = 0;
- }
+ for_each_engine(engine, dev_priv)
+ intel_ring_init_seqno(engine, seqno);
return 0;
}
@@ -2542,7 +2576,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_gem_object *obj,
bool flush_caches)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
@@ -2551,8 +2585,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
if (WARN_ON(request == NULL))
return;
- ring = request->ring;
- dev_priv = ring->dev->dev_private;
+ engine = request->engine;
+ dev_priv = request->i915;
ringbuf = request->ringbuf;
/*
@@ -2579,6 +2613,28 @@ void __i915_add_request(struct drm_i915_gem_request *request,
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
+ trace_i915_gem_request_add(request);
+
+ request->head = request_start;
+
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+ request->batch_obj = obj;
+
+ /* Seal the request and mark it as pending execution. Note that
+ * we may inspect this state, without holding any locks, during
+ * hangcheck. Hence we apply the barrier to ensure that we do not
+ * see a more recent value in the hws than we are tracking.
+ */
+ request->emitted_jiffies = jiffies;
+ request->previous_seqno = engine->last_submitted_seqno;
+ smp_store_mb(engine->last_submitted_seqno, request->seqno);
+ list_add_tail(&request->list, &engine->request_list);
+
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
@@ -2587,33 +2643,16 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists)
- ret = ring->emit_request(request);
+ ret = engine->emit_request(request);
else {
- ret = ring->add_request(request);
+ ret = engine->add_request(request);
request->tail = intel_ring_get_tail(ringbuf);
}
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
- request->head = request_start;
-
- /* Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- request->batch_obj = obj;
-
- request->emitted_jiffies = jiffies;
- request->previous_seqno = ring->last_submitted_seqno;
- ring->last_submitted_seqno = request->seqno;
- list_add_tail(&request->list, &ring->request_list);
-
- trace_i915_gem_request_add(request);
-
- i915_queue_hangcheck(ring->dev);
+ i915_queue_hangcheck(engine->dev);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
@@ -2680,7 +2719,7 @@ void i915_gem_request_free(struct kref *req_ref)
if (ctx) {
if (i915.enable_execlists && ctx != req->i915->kernel_context)
- intel_lr_context_unpin(ctx, req->ring);
+ intel_lr_context_unpin(ctx, req->engine);
i915_gem_context_unreference(ctx);
}
@@ -2689,11 +2728,12 @@ void i915_gem_request_free(struct kref *req_ref)
}
static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *ring,
+__i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@@ -2702,17 +2742,26 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
*req_out = NULL;
+ /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+ * and restart.
+ */
+ ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->dev, &req->seqno);
if (ret)
goto err;
kref_init(&req->ref);
req->i915 = dev_priv;
- req->ring = ring;
+ req->engine = engine;
+ req->reset_counter = reset_counter;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
@@ -2742,7 +2791,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
* fully prepared. Thus it can be cleaned up using the proper
* free code.
*/
- i915_gem_request_cancel(req);
+ intel_ring_reserved_space_cancel(req->ringbuf);
+ i915_gem_request_unreference(req);
return ret;
}
@@ -2779,19 +2829,12 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return err ? ERR_PTR(err) : req;
}
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
- intel_ring_reserved_space_cancel(req->ringbuf);
-
- i915_gem_request_unreference(req);
-}
-
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring)
+i915_gem_find_active_request(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
- list_for_each_entry(request, &ring->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, list) {
if (i915_gem_request_completed(request, false))
continue;
@@ -2801,38 +2844,38 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
return NULL;
}
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
bool ring_hung;
- request = i915_gem_find_active_request(ring);
+ request = i915_gem_find_active_request(engine);
if (request == NULL)
return;
- ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+ ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
i915_set_reset_status(dev_priv, request->ctx, ring_hung);
- list_for_each_entry_continue(request, &ring->request_list, list)
+ list_for_each_entry_continue(request, &engine->request_list, list)
i915_set_reset_status(dev_priv, request->ctx, false);
}
-static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
{
struct intel_ringbuffer *buffer;
- while (!list_empty(&ring->active_list)) {
+ while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj;
- obj = list_first_entry(&ring->active_list,
+ obj = list_first_entry(&engine->active_list,
struct drm_i915_gem_object,
- ring_list[ring->id]);
+ engine_list[engine->id]);
- i915_gem_object_retire__read(obj, ring->id);
+ i915_gem_object_retire__read(obj, engine->id);
}
/*
@@ -2842,14 +2885,16 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
*/
if (i915.enable_execlists) {
- spin_lock_irq(&ring->execlist_lock);
+ /* Ensure irq handler finishes or is cancelled. */
+ tasklet_kill(&engine->irq_tasklet);
+ spin_lock_bh(&engine->execlist_lock);
/* list_splice_tail_init checks for empty lists */
- list_splice_tail_init(&ring->execlist_queue,
- &ring->execlist_retired_req_list);
+ list_splice_tail_init(&engine->execlist_queue,
+ &engine->execlist_retired_req_list);
+ spin_unlock_bh(&engine->execlist_lock);
- spin_unlock_irq(&ring->execlist_lock);
- intel_execlists_retire_requests(ring);
+ intel_execlists_retire_requests(engine);
}
/*
@@ -2859,10 +2904,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* implicit references on things like e.g. ppgtt address spaces through
* the request.
*/
- while (!list_empty(&ring->request_list)) {
+ while (!list_empty(&engine->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&ring->request_list,
+ request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request,
list);
@@ -2876,28 +2921,29 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head.
*/
- list_for_each_entry(buffer, &ring->buffers, link) {
+ list_for_each_entry(buffer, &engine->buffers, link) {
buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer);
}
+
+ intel_ring_init_seqno(engine, engine->last_submitted_seqno);
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
/*
* Before we free the objects from the requests, we need to inspect
* them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first.
*/
- for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_status(dev_priv, ring);
+ for_each_engine(engine, dev_priv)
+ i915_gem_reset_engine_status(dev_priv, engine);
- for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_cleanup(dev_priv, ring);
+ for_each_engine(engine, dev_priv)
+ i915_gem_reset_engine_cleanup(dev_priv, engine);
i915_gem_context_reset(dev);
@@ -2910,19 +2956,19 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
void
-i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
{
- WARN_ON(i915_verify_lists(ring->dev));
+ WARN_ON(i915_verify_lists(engine->dev));
/* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to
* confusion.
*/
- while (!list_empty(&ring->request_list)) {
+ while (!list_empty(&engine->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&ring->request_list,
+ request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request,
list);
@@ -2936,45 +2982,44 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
*/
- while (!list_empty(&ring->active_list)) {
+ while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj;
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list[ring->id]);
+ obj = list_first_entry(&engine->active_list,
+ struct drm_i915_gem_object,
+ engine_list[engine->id]);
- if (!list_empty(&obj->last_read_req[ring->id]->list))
+ if (!list_empty(&obj->last_read_req[engine->id]->list))
break;
- i915_gem_object_retire__read(obj, ring->id);
+ i915_gem_object_retire__read(obj, engine->id);
}
- if (unlikely(ring->trace_irq_req &&
- i915_gem_request_completed(ring->trace_irq_req, true))) {
- ring->irq_put(ring);
- i915_gem_request_assign(&ring->trace_irq_req, NULL);
+ if (unlikely(engine->trace_irq_req &&
+ i915_gem_request_completed(engine->trace_irq_req, true))) {
+ engine->irq_put(engine);
+ i915_gem_request_assign(&engine->trace_irq_req, NULL);
}
- WARN_ON(i915_verify_lists(ring->dev));
+ WARN_ON(i915_verify_lists(engine->dev));
}
bool
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool idle = true;
- int i;
- for_each_ring(ring, dev_priv, i) {
- i915_gem_retire_requests_ring(ring);
- idle &= list_empty(&ring->request_list);
+ for_each_engine(engine, dev_priv) {
+ i915_gem_retire_requests_ring(engine);
+ idle &= list_empty(&engine->request_list);
if (i915.enable_execlists) {
- spin_lock_irq(&ring->execlist_lock);
- idle &= list_empty(&ring->execlist_queue);
- spin_unlock_irq(&ring->execlist_lock);
+ spin_lock_bh(&engine->execlist_lock);
+ idle &= list_empty(&engine->execlist_queue);
+ spin_unlock_bh(&engine->execlist_lock);
- intel_execlists_retire_requests(ring);
+ intel_execlists_retire_requests(engine);
}
}
@@ -3011,25 +3056,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.idle_work.work);
struct drm_device *dev = dev_priv->dev;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- if (!list_empty(&ring->request_list))
+ for_each_engine(engine, dev_priv)
+ if (!list_empty(&engine->request_list))
return;
/* we probably should sync with hangcheck here, using cancel_work_sync.
- * Also locking seems to be fubar here, ring->request_list is protected
+ * Also locking seems to be fubar here, engine->request_list is protected
* by dev->struct_mutex. */
intel_mark_idle(dev);
if (mutex_trylock(&dev->struct_mutex)) {
- struct intel_engine_cs *ring;
- int i;
-
- for_each_ring(ring, dev_priv, i)
- i915_gem_batch_pool_fini(&ring->batch_pool);
+ for_each_engine(engine, dev_priv)
+ i915_gem_batch_pool_fini(&engine->batch_pool);
mutex_unlock(&dev->struct_mutex);
}
@@ -3048,7 +3089,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (!obj->active)
return 0;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
@@ -3093,11 +3134,9 @@ retire:
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *req[I915_NUM_RINGS];
- unsigned reset_counter;
+ struct drm_i915_gem_request *req[I915_NUM_ENGINES];
int i, n = 0;
int ret;
@@ -3108,7 +3147,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
if (&obj->base == NULL) {
mutex_unlock(&dev->struct_mutex);
return -ENOENT;
@@ -3131,9 +3170,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL)
continue;
@@ -3144,7 +3182,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < n; i++) {
if (ret == 0)
- ret = __i915_wait_request(req[i], reset_counter, true,
+ ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]);
@@ -3166,7 +3204,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *from;
int ret;
- from = i915_gem_request_get_ring(from_req);
+ from = i915_gem_request_get_engine(from_req);
if (to == from)
return 0;
@@ -3176,7 +3214,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
- atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
&i915->rps.semaphores);
@@ -3260,7 +3297,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request **to_req)
{
const bool readonly = obj->base.pending_write_domain == 0;
- struct drm_i915_gem_request *req[I915_NUM_RINGS];
+ struct drm_i915_gem_request *req[I915_NUM_ENGINES];
int ret, i, n;
if (!obj->active)
@@ -3274,7 +3311,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (obj->last_write_req)
req[n++] = obj->last_write_req;
} else {
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < I915_NUM_ENGINES; i++)
if (obj->last_read_req[i])
req[n++] = obj->last_read_req[i];
}
@@ -3297,9 +3334,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
- /* Wait for any direct GTT access to complete */
- mb();
-
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -3391,28 +3425,25 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i;
+ struct intel_engine_cs *engine;
+ int ret;
/* Flush everything onto the inactive list. */
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = i915_switch_context(req);
- if (ret) {
- i915_gem_request_cancel(req);
- return ret;
- }
-
i915_add_request_no_flush(req);
+ if (ret)
+ return ret;
}
- ret = intel_ring_idle(ring);
+ ret = intel_engine_idle(engine);
if (ret)
return ret;
}
@@ -3466,7 +3497,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 fence_alignment, unfenced_alignment;
u32 search_flag, alloc_flag;
u64 start, end;
@@ -3513,7 +3545,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->gtt.mappable_end);
+ end = min_t(u64, end, ggtt->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
@@ -3720,6 +3752,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t old_write_domain, old_read_domains;
struct i915_vma *vma;
int ret;
@@ -3774,7 +3809,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link,
- &to_i915(obj->base.dev)->gtt.base.inactive_list);
+ &ggtt->base.inactive_list);
return 0;
}
@@ -3906,7 +3941,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
@@ -3949,7 +3984,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
return -ENODEV;
level = I915_CACHE_LLC;
@@ -3967,7 +4002,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto rpm_put;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -4128,16 +4163,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
- unsigned reset_counter;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
- if (ret)
- return ret;
+ /* ABI: return -EIO if already wedged */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return -EIO;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -4153,7 +4187,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
@@ -4161,7 +4194,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -4211,7 +4244,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
- to_i915(obj->base.dev)->gtt.mappable_end);
+ to_i915(obj->base.dev)->ggtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
@@ -4243,9 +4276,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
i915_gem_obj_to_vma(obj, vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
@@ -4308,10 +4338,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
- if (WARN_ONCE(!view, "no view specified"))
- return -EINVAL;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ BUG_ON(!view);
+
+ return i915_gem_object_do_pin(obj, &ggtt->base, view,
alignment, flags | PIN_GLOBAL);
}
@@ -4321,7 +4354,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
- BUG_ON(!vma);
WARN_ON(vma->pin_count == 0);
WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
@@ -4340,7 +4372,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -4359,15 +4391,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->active) {
int i;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
if (req)
- args->busy |= 1 << (16 + req->ring->exec_id);
+ args->busy |= 1 << (16 + req->engine->exec_id);
}
if (obj->last_write_req)
- args->busy |= obj->last_write_req->ring->exec_id;
+ args->busy |= obj->last_write_req->engine->exec_id;
}
unref:
@@ -4405,7 +4437,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -4447,8 +4479,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
int i;
INIT_LIST_HEAD(&obj->global_list);
- for (i = 0; i < I915_NUM_RINGS; i++)
- INIT_LIST_HEAD(&obj->ring_list[i]);
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ INIT_LIST_HEAD(&obj->engine_list[i]);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4623,14 +4655,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
- if (WARN_ONCE(!view, "no view specified"))
- return ERR_PTR(-EINVAL);
+ BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
@@ -4653,14 +4686,13 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
}
static void
-i915_gem_stop_ringbuffers(struct drm_device *dev)
+i915_gem_stop_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- dev_priv->gt.stop_ring(ring);
+ for_each_engine(engine, dev_priv)
+ dev_priv->gt.stop_engine(engine);
}
int
@@ -4676,7 +4708,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev);
- i915_gem_stop_ringbuffers(dev);
+ i915_gem_stop_engines(dev);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4697,8 +4729,8 @@ err:
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
@@ -4716,12 +4748,12 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
- intel_ring_emit(ring, remap_info[i]);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+ intel_ring_emit(engine, remap_info[i]);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return ret;
}
@@ -4778,7 +4810,7 @@ static void init_unused_rings(struct drm_device *dev)
}
}
-int i915_gem_init_rings(struct drm_device *dev)
+int i915_gem_init_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -4814,13 +4846,13 @@ int i915_gem_init_rings(struct drm_device *dev)
return 0;
cleanup_vebox_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+ intel_cleanup_engine(&dev_priv->engine[VECS]);
cleanup_blt_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+ intel_cleanup_engine(&dev_priv->engine[BCS]);
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+ intel_cleanup_engine(&dev_priv->engine[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+ intel_cleanup_engine(&dev_priv->engine[RCS]);
return ret;
}
@@ -4829,16 +4861,13 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i, j;
-
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
+ struct intel_engine_cs *engine;
+ int ret, j;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (dev_priv->ellc_size)
+ if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev))
@@ -4876,12 +4905,14 @@ i915_gem_init_hw(struct drm_device *dev)
}
/* Need to do basic initialisation of all rings first: */
- for_each_ring(ring, dev_priv, i) {
- ret = ring->init_hw(ring);
+ for_each_engine(engine, dev_priv) {
+ ret = engine->init_hw(engine);
if (ret)
goto out;
}
+ intel_mocs_init_l3cc_table(dev);
+
/* We can't enable contexts until all firmware is loaded */
if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev);
@@ -4901,38 +4932,39 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
/* Now it is safe to go back round and do everything else: */
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
- i915_gem_cleanup_ringbuffer(dev);
- goto out;
+ break;
}
- if (ring->id == RCS) {
- for (j = 0; j < NUM_L3_SLICES(dev); j++)
- i915_gem_l3_remap(req, j);
+ if (engine->id == RCS) {
+ for (j = 0; j < NUM_L3_SLICES(dev); j++) {
+ ret = i915_gem_l3_remap(req, j);
+ if (ret)
+ goto err_request;
+ }
}
ret = i915_ppgtt_init_ring(req);
- if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
- i915_gem_request_cancel(req);
- i915_gem_cleanup_ringbuffer(dev);
- goto out;
- }
+ if (ret)
+ goto err_request;
ret = i915_gem_context_enable(req);
- if (ret && ret != -EIO) {
- DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
- i915_gem_request_cancel(req);
- i915_gem_cleanup_ringbuffer(dev);
- goto out;
- }
+ if (ret)
+ goto err_request;
+err_request:
i915_add_request_no_flush(req);
+ if (ret) {
+ DRM_ERROR("Failed to enable %s, error=%d\n",
+ engine->name, ret);
+ i915_gem_cleanup_engines(dev);
+ break;
+ }
}
out:
@@ -4952,14 +4984,14 @@ int i915_gem_init(struct drm_device *dev)
if (!i915.enable_execlists) {
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
- dev_priv->gt.init_rings = i915_gem_init_rings;
- dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
- dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+ dev_priv->gt.init_engines = i915_gem_init_engines;
+ dev_priv->gt.cleanup_engine = intel_cleanup_engine;
+ dev_priv->gt.stop_engine = intel_stop_engine;
} else {
dev_priv->gt.execbuf_submit = intel_execlists_submission;
- dev_priv->gt.init_rings = intel_logical_rings_init;
- dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
- dev_priv->gt.stop_ring = intel_logical_ring_stop;
+ dev_priv->gt.init_engines = intel_logical_rings_init;
+ dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+ dev_priv->gt.stop_engine = intel_logical_ring_stop;
}
/* This is just a security blanket to placate dragons.
@@ -4974,13 +5006,13 @@ int i915_gem_init(struct drm_device *dev)
if (ret)
goto out_unlock;
- i915_gem_init_global_gtt(dev);
+ i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev);
if (ret)
goto out_unlock;
- ret = dev_priv->gt.init_rings(dev);
+ ret = dev_priv->gt.init_engines(dev);
if (ret)
goto out_unlock;
@@ -5003,29 +5035,52 @@ out_unlock:
}
void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- dev_priv->gt.cleanup_ring(ring);
+ for_each_engine(engine, dev_priv)
+ dev_priv->gt.cleanup_engine(engine);
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev);
+ if (i915.enable_execlists)
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev, ALL_ENGINES);
}
static void
-init_ring_lists(struct intel_engine_cs *ring)
+init_engine_lists(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+}
+
+void
+i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv))
+ dev_priv->num_fence_regs = 32;
+ else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
+ IS_I945GM(dev_priv) || IS_G33(dev_priv))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
+ if (intel_vgpu_active(dev))
+ dev_priv->num_fence_regs =
+ I915_READ(vgtif_reg(avail_rs.fence_num));
+
+ /* Initialize fence registers to zero */
+ i915_gem_restore_fences(dev);
+
+ i915_gem_detect_bit_6_swizzle(dev);
}
void
@@ -5055,8 +5110,8 @@ i915_gem_load_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_RINGS; i++)
- init_ring_lists(&dev_priv->ring[i]);
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ init_engine_lists(&dev_priv->engine[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -5067,17 +5122,6 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
- dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dev_priv->num_fence_regs = 16;
- else
- dev_priv->num_fence_regs = 8;
-
- if (intel_vgpu_active(dev))
- dev_priv->num_fence_regs =
- I915_READ(vgtif_reg(avail_rs.fence_num));
-
/*
* Set initial sequence number for requests.
* Using this number allows the wraparound to happen early,
@@ -5086,11 +5130,8 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->next_seqno = ((u32)~0 - 0x1100);
dev_priv->last_seqno = ((u32)~0 - 0x1101);
- /* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- i915_gem_restore_fences(dev);
- i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
@@ -5213,11 +5254,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+ struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
@@ -5244,11 +5286,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+ struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
new file mode 100644
index 000000000..8292e797d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_H__
+#define __I915_GEM_H__
+
+#ifdef CONFIG_DRM_I915_DEBUG_GEM
+#define GEM_BUG_ON(expr) BUG_ON(expr)
+#else
+#define GEM_BUG_ON(expr)
+#endif
+
+#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5dd84e148..e5acc3916 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -342,15 +342,15 @@ void i915_gem_context_reset(struct drm_device *dev)
struct intel_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- intel_lr_context_reset(dev, ctx);
+ intel_lr_context_reset(dev_priv, ctx);
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_engine_cs *engine = &dev_priv->engine[i];
- if (ring->last_context) {
- i915_gem_context_unpin(ring->last_context, ring);
- ring->last_context = NULL;
+ if (engine->last_context) {
+ i915_gem_context_unpin(engine->last_context, engine);
+ engine->last_context = NULL;
}
}
@@ -413,7 +413,7 @@ void i915_gem_context_fini(struct drm_device *dev)
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
- intel_gpu_reset(dev);
+ intel_gpu_reset(dev, ALL_ENGINES);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
@@ -421,17 +421,17 @@ void i915_gem_context_fini(struct drm_device *dev)
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
- WARN_ON(!dev_priv->ring[RCS].last_context);
+ WARN_ON(!dev_priv->engine[RCS].last_context);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
- for (i = I915_NUM_RINGS; --i >= 0;) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ for (i = I915_NUM_ENGINES; --i >= 0;) {
+ struct intel_engine_cs *engine = &dev_priv->engine[i];
- if (ring->last_context) {
- i915_gem_context_unpin(ring->last_context, ring);
- ring->last_context = NULL;
+ if (engine->last_context) {
+ i915_gem_context_unpin(engine->last_context, engine);
+ engine->last_context = NULL;
}
}
@@ -441,14 +441,14 @@ void i915_gem_context_fini(struct drm_device *dev)
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
if (i915.enable_execlists) {
- if (ring->init_context == NULL)
+ if (engine->init_context == NULL)
return 0;
- ret = ring->init_context(req);
+ ret = engine->init_context(req);
} else
ret = i915_switch_context(req);
@@ -510,133 +510,147 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(ring->dev) ?
- hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(engine->dev) ?
+ hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
0;
- int len, i, ret;
+ int len, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(ring->dev)) {
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+ if (IS_GEN6(engine->dev)) {
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+ if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(ring->dev)->gen < 8)
+ else if (INTEL_INFO(engine->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(ring->dev)->gen >= 7)
- len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+ if (INTEL_INFO(engine->dev)->gen >= 7)
+ len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7) {
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ if (INTEL_INFO(engine->dev)->gen >= 7) {
+ intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
- for_each_ring(signaller, to_i915(ring->dev), i) {
- if (signaller == ring)
+ intel_ring_emit(engine,
+ MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_engine(signaller, to_i915(engine->dev)) {
+ if (signaller == engine)
continue;
- intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ intel_ring_emit_reg(engine,
+ RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(engine,
+ _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_SET_CONTEXT);
+ intel_ring_emit(engine,
+ i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (INTEL_INFO(engine->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
+ i915_reg_t last_reg = {}; /* keep gcc quiet */
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
- for_each_ring(signaller, to_i915(ring->dev), i) {
- if (signaller == ring)
+ intel_ring_emit(engine,
+ MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_engine(signaller, to_i915(engine->dev)) {
+ if (signaller == engine)
continue;
- intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ last_reg = RING_PSMI_CTL(signaller->mmio_base);
+ intel_ring_emit_reg(engine, last_reg);
+ intel_ring_emit(engine,
+ _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
+
+ /* Insert a delay before the next switch! */
+ intel_ring_emit(engine,
+ MI_STORE_REGISTER_MEM |
+ MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit_reg(engine, last_reg);
+ intel_ring_emit(engine, engine->scratch.gtt_offset);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return ret;
}
-static inline bool should_skip_switch(struct intel_engine_cs *ring,
- struct intel_context *from,
- struct intel_context *to)
+static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
+ struct intel_context *to)
{
if (to->remap_slice)
return false;
- if (to->ppgtt && from == to &&
- !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
- return true;
+ if (!to->legacy_hw_ctx.initialized)
+ return false;
- return false;
+ if (to->ppgtt &&
+ !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ return false;
+
+ return to == engine->last_context;
}
static bool
-needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
if (!to->ppgtt)
return false;
- if (INTEL_INFO(ring->dev)->gen < 8)
+ if (engine->last_context == to &&
+ !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ return false;
+
+ if (engine->id != RCS)
return true;
- if (ring != &dev_priv->ring[RCS])
+ if (INTEL_INFO(engine->dev)->gen < 8)
return true;
return false;
}
static bool
-needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
- u32 hw_flags)
+needs_pd_load_post(struct intel_context *to, u32 hw_flags)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
if (!to->ppgtt)
return false;
- if (!IS_GEN8(ring->dev))
- return false;
-
- if (ring != &dev_priv->ring[RCS])
+ if (!IS_GEN8(to->i915))
return false;
if (hw_flags & MI_RESTORE_INHIBIT)
@@ -645,58 +659,32 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
return false;
}
-static int do_switch(struct drm_i915_gem_request *req)
+static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_context *from = ring->last_context;
- u32 hw_flags = 0;
- bool uninitialized = false;
+ struct intel_engine_cs *engine = req->engine;
+ struct intel_context *from;
+ u32 hw_flags;
int ret, i;
- if (from != NULL && ring == &dev_priv->ring[RCS]) {
- BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
- BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
- }
-
- if (should_skip_switch(ring, from, to))
+ if (skip_rcs_switch(engine, to))
return 0;
/* Trying to pin first makes error handling easier. */
- if (ring == &dev_priv->ring[RCS]) {
- ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(ring->dev), 0);
- if (ret)
- return ret;
- }
+ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+ get_context_alignment(engine->dev),
+ 0);
+ if (ret)
+ return ret;
/*
* Pin can switch back to the default context if we end up calling into
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
+ *
+ * XXX: Doing so is painfully broken!
*/
- from = ring->last_context;
-
- if (needs_pd_load_pre(ring, to)) {
- /* Older GENs and non render rings still want the load first,
- * "PP_DCLV followed by PP_DIR_BASE register through Load
- * Register Immediate commands in Ring Buffer before submitting
- * a context."*/
- trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
- if (ret)
- goto unpin_out;
-
- /* Doing a PD load always reloads the page dirs */
- to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
- }
-
- if (ring != &dev_priv->ring[RCS]) {
- if (from)
- i915_gem_context_unreference(from);
- goto done;
- }
+ from = engine->last_context;
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
@@ -710,53 +698,37 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
- hw_flags |= MI_RESTORE_INHIBIT;
+ if (needs_pd_load_pre(engine, to)) {
+ /* Older GENs and non render rings still want the load first,
+ * "PP_DCLV followed by PP_DIR_BASE register through Load
+ * Register Immediate commands in Ring Buffer before submitting
+ * a context."*/
+ trace_switch_mm(engine, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ if (ret)
+ goto unpin_out;
+ }
+
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
- } else if (to->ppgtt &&
- (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
- hw_flags |= MI_FORCE_RESTORE;
- to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
- }
+ hw_flags = MI_RESTORE_INHIBIT;
+ else if (to->ppgtt &&
+ intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+ hw_flags = MI_FORCE_RESTORE;
+ else
+ hw_flags = 0;
/* We should never emit switch_mm more than once */
- WARN_ON(needs_pd_load_pre(ring, to) &&
- needs_pd_load_post(ring, to, hw_flags));
-
- ret = mi_set_context(req, hw_flags);
- if (ret)
- goto unpin_out;
+ WARN_ON(needs_pd_load_pre(engine, to) &&
+ needs_pd_load_post(to, hw_flags));
- /* GEN8 does *not* require an explicit reload if the PDPs have been
- * setup, and we do not wish to move them.
- */
- if (needs_pd_load_post(ring, to, hw_flags)) {
- trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
- /* The hardware context switch is emitted, but we haven't
- * actually changed the state - so it's probably safe to bail
- * here. Still, let the user know something dangerous has
- * happened.
- */
- if (ret) {
- DRM_ERROR("Failed to change address space on context switch\n");
- goto unpin_out;
- }
- }
-
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to->remap_slice & (1<<i)))
- continue;
-
- ret = i915_gem_l3_remap(req, i);
- /* If it failed, try again next round */
+ if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
+ ret = mi_set_context(req, hw_flags);
if (ret)
- DRM_DEBUG_DRIVER("L3 remapping failed\n");
- else
- to->remap_slice &= ~(1<<i);
+ goto unpin_out;
}
/* The backing object for the context is done after switching to the
@@ -781,27 +753,51 @@ static int do_switch(struct drm_i915_gem_request *req)
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
+ i915_gem_context_reference(to);
+ engine->last_context = to;
+
+ /* GEN8 does *not* require an explicit reload if the PDPs have been
+ * setup, and we do not wish to move them.
+ */
+ if (needs_pd_load_post(to, hw_flags)) {
+ trace_switch_mm(engine, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ /* The hardware context switch is emitted, but we haven't
+ * actually changed the state - so it's probably safe to bail
+ * here. Still, let the user know something dangerous has
+ * happened.
+ */
+ if (ret)
+ return ret;
+ }
- uninitialized = !to->legacy_hw_ctx.initialized;
- to->legacy_hw_ctx.initialized = true;
+ if (to->ppgtt)
+ to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-done:
- i915_gem_context_reference(to);
- ring->last_context = to;
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to->remap_slice & (1<<i)))
+ continue;
+
+ ret = i915_gem_l3_remap(req, i);
+ if (ret)
+ return ret;
- if (uninitialized) {
- if (ring->init_context) {
- ret = ring->init_context(req);
+ to->remap_slice &= ~(1<<i);
+ }
+
+ if (!to->legacy_hw_ctx.initialized) {
+ if (engine->init_context) {
+ ret = engine->init_context(req);
if (ret)
- DRM_ERROR("ring init context: %d\n", ret);
+ return ret;
}
+ to->legacy_hw_ctx.initialized = true;
}
return 0;
unpin_out:
- if (ring->id == RCS)
- i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@@ -820,23 +816,39 @@ unpin_out:
*/
int i915_switch_context(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_private *dev_priv = req->i915;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
- if (req->ctx != ring->last_context) {
- i915_gem_context_reference(req->ctx);
- if (ring->last_context)
- i915_gem_context_unreference(ring->last_context);
- ring->last_context = req->ctx;
+ if (engine->id != RCS ||
+ req->ctx->legacy_hw_ctx.rcs_state == NULL) {
+ struct intel_context *to = req->ctx;
+
+ if (needs_pd_load_pre(engine, to)) {
+ int ret;
+
+ trace_switch_mm(engine, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ if (ret)
+ return ret;
+
+ /* Doing a PD load always reloads the page dirs */
+ to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
+
+ if (to != engine->last_context) {
+ i915_gem_context_reference(to);
+ if (engine->last_context)
+ i915_gem_context_unreference(engine->last_context);
+ engine->last_context = to;
+ }
+
return 0;
}
- return do_switch(req);
+ return do_rcs_switch(req);
}
static bool contexts_enabled(struct drm_device *dev)
@@ -937,7 +949,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
- args->value = to_i915(dev)->gtt.base.total;
+ args->value = to_i915(dev)->ggtt.base.total;
break;
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 17299d041..a56516482 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev)
static int warned;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int err = 0;
- int i;
if (warned)
return 0;
- for_each_ring(ring, dev_priv, i) {
- list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+ for_each_engine(engine, dev_priv) {
+ list_for_each_entry(obj, &engine->active_list,
+ engine_list[engine->id]) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n",
- ring->name, obj);
+ engine->name, obj);
err++;
break;
} else if (!obj->active ||
- obj->last_read_req[ring->id] == NULL) {
+ obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n",
- ring->name, obj);
+ engine->name, obj);
err++;
} else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
- ring->name,
+ engine->name,
obj, obj->base.write_domain);
err++;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 0506016e1..80bbe43a2 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- mutex_lock(&obj->base.dev->struct_mutex);
-
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
+ mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
-
mutex_unlock(&obj->base.dev->struct_mutex);
}
@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- struct sg_page_iter sg_iter;
- struct page **pages;
- int ret, i;
+ void *addr;
+ int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
- if (obj->dma_buf_vmapping) {
- obj->vmapping_count++;
- goto out_unlock;
- }
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err;
-
- i915_gem_object_pin_pages(obj);
-
- ret = -ENOMEM;
-
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- goto err_unpin;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
-
- if (!obj->dma_buf_vmapping)
- goto err_unpin;
-
- obj->vmapping_count = 1;
-out_unlock:
+ addr = i915_gem_object_pin_map(obj);
mutex_unlock(&dev->struct_mutex);
- return obj->dma_buf_vmapping;
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
+ return addr;
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
- if (--obj->vmapping_count == 0) {
- vunmap(obj->dma_buf_vmapping);
- obj->dma_buf_vmapping = NULL;
-
- i915_gem_object_unpin_pages(obj);
- }
+ i915_gem_object_unpin_map(obj);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1328bc502..33df74d98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset;
void __iomem *reloc_page;
@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
- io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ io_mapping_map_atomic_wc(ggtt->mappable,
offset);
}
@@ -488,7 +489,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
ret = relocate_entry_cpu(obj, reloc, target_offset);
else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
- else if (cpu_has_clflush)
+ else if (static_cpu_has(X86_FEATURE_CLFLUSH))
ret = relocate_entry_clflush(obj, reloc, target_offset);
else {
WARN_ONCE(1, "Impossible case in relocation handling\n");
@@ -514,7 +515,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
- user_relocs = to_user_ptr(entry->relocs_ptr);
+ user_relocs = u64_to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
while (remain) {
@@ -535,9 +536,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
return ret;
if (r->presumed_offset != offset &&
- __copy_to_user_inatomic(&user_relocs->presumed_offset,
- &r->presumed_offset,
- sizeof(r->presumed_offset))) {
+ __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
return -EFAULT;
}
@@ -599,7 +598,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -713,7 +712,7 @@ eb_vma_misplaced(struct i915_vma *vma)
}
static int
-i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas,
struct intel_context *ctx,
bool *need_relocs)
@@ -723,10 +722,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
int retry;
- i915_gem_retire_requests_ring(ring);
+ i915_gem_retire_requests_ring(engine);
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -788,7 +787,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (eb_vma_misplaced(vma))
ret = i915_vma_unbind(vma);
else
- ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma,
+ engine,
+ need_relocs);
if (ret)
goto err;
}
@@ -798,7 +799,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, engine,
+ need_relocs);
if (ret)
goto err;
}
@@ -821,7 +823,7 @@ static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
struct intel_context *ctx)
@@ -865,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
u64 invalid_offset = (u64)-1;
int j;
- user_relocs = to_user_ptr(exec[i].relocs_ptr);
+ user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
@@ -910,7 +912,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+ &need_relocs);
if (ret)
goto err;
@@ -938,7 +941,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(req->ring);
+ const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -948,7 +951,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->ring, &req);
+ ret = i915_gem_object_sync(obj, req->engine, &req);
if (ret)
return ret;
}
@@ -960,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (flush_chipset)
- i915_gem_chipset_flush(req->ring->dev);
+ i915_gem_chipset_flush(req->engine->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -1009,7 +1012,7 @@ validate_exec_list(struct drm_device *dev,
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
for (i = 0; i < count; i++) {
- char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+ char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & invalid_flags)
@@ -1062,12 +1065,12 @@ validate_exec_list(struct drm_device *dev,
static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring, const u32 ctx_id)
+ struct intel_engine_cs *engine, const u32 ctx_id)
{
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
+ if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -1080,8 +1083,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
- if (i915.enable_execlists && !ctx->engine[ring->id].state) {
- int ret = intel_lr_context_deferred_alloc(ctx, ring);
+ if (i915.enable_execlists && !ctx->engine[engine->id].state) {
+ int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
@@ -1095,7 +1098,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+ struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -1122,7 +1125,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
@@ -1132,11 +1135,11 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
}
}
-void
+static void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
- params->ring->gpu_caches_dirty = true;
+ params->engine->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
__i915_add_request(params->request, params->batch_obj, true);
@@ -1146,11 +1149,11 @@ static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+ if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -1160,18 +1163,18 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return ret;
for (i = 0; i < 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(ring, 0);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(engine, 0);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
static struct drm_i915_gem_object*
-i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
@@ -1183,12 +1186,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct i915_vma *vma;
int ret;
- shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
+ shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
- ret = i915_parse_cmds(ring,
+ ret = i915_parse_cmds(engine,
batch_obj,
shadow_batch_obj,
batch_start_offset,
@@ -1229,7 +1232,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
- struct intel_engine_cs *ring = params->ring;
+ struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_start, exec_len;
int instp_mode;
@@ -1244,8 +1247,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
- WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
- "%s didn't clear reload\n", ring->name);
+ WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
+ "%s didn't clear reload\n", engine->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
@@ -1253,7 +1256,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
@@ -1280,17 +1283,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
- if (ring == &dev_priv->ring[RCS] &&
+ if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, INSTPM);
+ intel_ring_emit(engine, instp_mask << 16 | instp_mode);
+ intel_ring_advance(engine);
dev_priv->relative_constants_mode = instp_mode;
}
@@ -1308,7 +1311,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
- ret = ring->dispatch_execbuffer(params->request,
+ ret = engine->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
if (ret)
@@ -1317,7 +1320,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
- i915_gem_execbuffer_retire_commands(params);
return 0;
}
@@ -1365,7 +1367,7 @@ eb_get_batch(struct eb_vmas *eb)
#define I915_USER_RINGS (4)
-static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
@@ -1408,12 +1410,12 @@ eb_select_ring(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- *ring = &dev_priv->ring[_VCS(bsd_idx)];
+ *ring = &dev_priv->engine[_VCS(bsd_idx)];
} else {
- *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+ *ring = &dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_ring_initialized(*ring)) {
+ if (!intel_engine_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
@@ -1427,12 +1429,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
@@ -1459,7 +1462,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
- ret = eb_select_ring(dev_priv, file, args, &ring);
+ ret = eb_select_ring(dev_priv, file, args, &engine);
if (ret)
return ret;
@@ -1473,9 +1476,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
- if (ring->id != RCS) {
+ if (engine->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
- ring->name);
+ engine->name);
return -EINVAL;
}
@@ -1488,7 +1491,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+ ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
@@ -1500,7 +1503,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
- vm = &dev_priv->gtt.base;
+ vm = &ggtt->base;
memset(&params_master, 0x00, sizeof(params_master));
@@ -1522,7 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+ &need_relocs);
if (ret)
goto err;
@@ -1531,7 +1535,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
+ engine,
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
@@ -1547,16 +1552,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
- if (i915_needs_cmd_parser(ring) && args->batch_len) {
+ if (i915_needs_cmd_parser(engine) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
- parsed_batch_obj = i915_gem_execbuffer_parse(ring,
- &shadow_exec_entry,
- eb,
- batch_obj,
- args->batch_start_offset,
- args->batch_len,
- file->is_master);
+ parsed_batch_obj = i915_gem_execbuffer_parse(engine,
+ &shadow_exec_entry,
+ eb,
+ batch_obj,
+ args->batch_start_offset,
+ args->batch_len,
+ file->is_master);
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
@@ -1608,7 +1613,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
- req = i915_gem_request_alloc(ring, ctx);
+ req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
@@ -1616,7 +1621,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_request_add_to_client(req, file);
if (ret)
- goto err_batch_unpin;
+ goto err_request;
/*
* Save assorted stuff away to pass through to *_submission().
@@ -1626,13 +1631,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->dev = dev;
params->file = file;
- params->ring = ring;
+ params->engine = engine;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+err_request:
+ i915_gem_execbuffer_retire_commands(params);
err_batch_unpin:
/*
@@ -1649,14 +1656,6 @@ err:
i915_gem_context_unreference(ctx);
eb_destroy(eb);
- /*
- * If the request was created but not successfully submitted then it
- * must be freed again. If it was submitted then it is being tracked
- * on the active request list and no clean up is required here.
- */
- if (ret && !IS_ERR_OR_NULL(req))
- i915_gem_request_cancel(req);
-
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
@@ -1696,7 +1695,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1732,7 +1731,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
struct drm_i915_gem_exec_object __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) {
@@ -1775,18 +1774,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
- exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
- GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
- if (exec2_list == NULL)
- exec2_list = drm_malloc_ab(sizeof(*exec2_list),
- args->buffer_count);
+ exec2_list = drm_malloc_gfp(args->buffer_count,
+ sizeof(*exec2_list),
+ GFP_TEMPORARY);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1799,7 +1796,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
struct drm_i915_gem_exec_object2 __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
int i;
for (i = 0; i < args->buffer_count; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 49e4f26b7..92acdff9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
BUG_ON(entry >= 4);
@@ -667,13 +667,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
- intel_ring_emit(ring, upper_32_bits(addr));
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
- intel_ring_emit(ring, lower_32_bits(addr));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
+ intel_ring_emit(engine, upper_32_bits(addr));
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
+ intel_ring_emit(engine, lower_32_bits(addr));
+ intel_ring_advance(engine);
return 0;
}
@@ -706,8 +706,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
uint64_t length,
gen8_pte_t scratch_pte)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
@@ -746,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
num_entries--;
}
- kunmap_px(ppgtt, pt);
+ kunmap_px(ppgtt, pt_vaddr);
pte = 0;
if (++pde == I915_PDES) {
@@ -762,8 +761,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch);
@@ -788,8 +786,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
@@ -829,8 +826,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sg_page_iter sg_iter;
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
@@ -909,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
- if (USES_FULL_48BIT_PPGTT(dev)) {
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -981,8 +976,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(vm->dev))
gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -1216,8 +1210,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
struct drm_device *dev = vm->dev;
struct i915_page_directory *pd;
@@ -1329,8 +1322,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
uint64_t length)
{
DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
int ret = 0;
@@ -1376,8 +1368,7 @@ err_out:
static int gen8_alloc_va_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (USES_FULL_48BIT_PPGTT(vm->dev))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
@@ -1629,6 +1620,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd,
uint32_t start, uint32_t length)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_table *pt;
uint32_t pde, temp;
@@ -1637,7 +1629,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(ggtt->gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1650,11 +1642,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1662,13 +1654,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(engine, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(engine, get_pd_offset(ppgtt));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -1676,22 +1668,22 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1699,17 +1691,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(engine, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(engine, get_pd_offset(ppgtt));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
- if (ring->id != RCS) {
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (engine->id != RCS) {
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
@@ -1720,15 +1712,15 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- POSTING_READ(RING_PP_DIR_DCLV(ring));
+ POSTING_READ(RING_PP_DIR_DCLV(engine));
return 0;
}
@@ -1736,12 +1728,11 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int j;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, j) {
+ for_each_engine(engine, dev_priv) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
@@ -1749,9 +1740,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
- int i;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
@@ -1765,9 +1755,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
/* GFX_MODE is per-ring on gen7+ */
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -1796,8 +1786,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -1831,8 +1820,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 flags)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
@@ -1864,9 +1852,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint32_t start, length, start_save, length_save;
uint32_t pde, temp;
@@ -1932,7 +1920,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(ggtt->gsm);
mark_tlbs_dirty(ppgtt);
return 0;
@@ -1978,8 +1966,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint32_t pde;
@@ -1997,7 +1984,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2005,23 +1993,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
ret = gen6_init_scratch(vm);
if (ret)
return ret;
alloc:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
- 0, dev_priv->gtt.base.total,
+ 0, ggtt->base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ ret = i915_gem_evict_something(dev, &ggtt->base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
- 0, dev_priv->gtt.base.total,
+ 0, ggtt->base.total,
0);
if (ret)
goto err_out;
@@ -2034,7 +2022,7 @@ alloc:
goto err_out;
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
return 0;
@@ -2062,10 +2050,11 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ ppgtt->base.pte_encode = ggtt->base.pte_encode;
if (IS_GEN6(dev)) {
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
@@ -2095,7 +2084,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -2192,7 +2181,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
{
- struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = req->i915;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
if (i915.enable_execlists)
@@ -2263,9 +2252,10 @@ static bool needs_idle_maps(struct drm_device *dev)
static bool do_idling(struct drm_i915_private *dev_priv)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->gtt.do_idle_maps)) {
+ if (unlikely(ggtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@@ -2279,22 +2269,23 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ if (unlikely(ggtt->do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen < 6)
return;
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(ring));
+ fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
@@ -2305,16 +2296,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
- I915_WRITE(RING_FAULT_REG(ring),
+ I915_WRITE(RING_FAULT_REG(engine),
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+ POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+ if (INTEL_INFO(dev_priv)->gen < 6) {
intel_gtt_chipset_flush();
} else {
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -2324,7 +2315,8 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
@@ -2334,10 +2326,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+ true);
i915_ggtt_flush(dev_priv);
}
@@ -2367,10 +2357,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -2444,10 +2435,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 flags)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -2487,12 +2479,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
@@ -2518,12 +2511,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
@@ -2613,32 +2607,31 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_device *dev = vma->vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = vma->obj;
- struct sg_table *pages = obj->pages;
- u32 pte_flags = 0;
+ u32 pte_flags;
int ret;
ret = i915_get_ggtt_vma_pages(vma);
if (ret)
return ret;
- pages = vma->ggtt_view.pages;
/* Currently applicable only to VLV */
- if (obj->gt_ro)
+ pte_flags = 0;
+ if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
if (flags & GLOBAL_BIND) {
- vma->vm->insert_entries(vma->vm, pages,
+ vma->vm->insert_entries(vma->vm,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, pte_flags);
}
if (flags & LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base, pages,
+ struct i915_hw_ppgtt *appgtt =
+ to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ appgtt->base.insert_entries(&appgtt->base,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, pte_flags);
}
@@ -2717,8 +2710,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -2726,13 +2719,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
- ggtt_vm->start = start;
+ ggtt->base.start = start;
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm */
- ggtt_vm->total = end - start - PAGE_SIZE;
- i915_address_space_init(ggtt_vm, dev_priv);
- ggtt_vm->total += PAGE_SIZE;
+ ggtt->base.total = end - start - PAGE_SIZE;
+ i915_address_space_init(&ggtt->base, dev_priv);
+ ggtt->base.total += PAGE_SIZE;
if (intel_vgpu_active(dev)) {
ret = intel_vgt_balloon(dev);
@@ -2741,36 +2734,36 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
}
if (!HAS_LLC(dev))
- ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
- ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+ ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
}
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start,
+ ggtt->base.clear_range(&ggtt->base, hole_start,
hole_end - hole_start, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+ ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt;
@@ -2801,28 +2794,33 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
true);
dev_priv->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
- dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
+ WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+ ggtt->base.bind_vma = aliasing_gtt_bind_vma;
}
return 0;
}
-void i915_gem_init_global_gtt(struct drm_device *dev)
+/**
+ * i915_gem_init_ggtt - Initialize GEM for Global GTT
+ * @dev: DRM device
+ */
+void i915_gem_init_ggtt(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u64 gtt_size, mappable_size;
-
- gtt_size = dev_priv->gtt.base.total;
- mappable_size = dev_priv->gtt.mappable_end;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+ i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
}
-void i915_global_gtt_cleanup(struct drm_device *dev)
+/**
+ * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
+ * @dev: DRM device
+ */
+void i915_ggtt_cleanup_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2832,15 +2830,15 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
i915_gem_cleanup_stolen(dev);
- if (drm_mm_initialized(&vm->mm)) {
+ if (drm_mm_initialized(&ggtt->base.mm)) {
if (intel_vgpu_active(dev))
intel_vgt_deballoon();
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ drm_mm_takedown(&ggtt->base.mm);
+ list_del(&ggtt->base.global_link);
}
- vm->cleanup(vm);
+ ggtt->base.cleanup(&ggtt->base);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2924,13 +2922,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_scratch *scratch_page;
- phys_addr_t gtt_phys_addr;
+ phys_addr_t ggtt_phys_addr;
/* For Modern GENs the PTEs and register space are split in the BAR */
- gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
- (pci_resource_len(dev->pdev, 0) / 2);
+ ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
+ (pci_resource_len(dev->pdev, 0) / 2);
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2940,10 +2939,10 @@ static int ggtt_probe_common(struct drm_device *dev,
* readback check when writing GTT PTE entries.
*/
if (IS_BROXTON(dev))
- dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+ ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
else
- dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
- if (!dev_priv->gtt.gsm) {
+ ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+ if (!ggtt->gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
@@ -2952,11 +2951,11 @@ static int ggtt_probe_common(struct drm_device *dev,
if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
- iounmap(dev_priv->gtt.gsm);
+ iounmap(ggtt->gsm);
return PTR_ERR(scratch_page);
}
- dev_priv->gtt.base.scratch_page = scratch_page;
+ ggtt->base.scratch_page = scratch_page;
return 0;
}
@@ -2977,7 +2976,7 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- if (!USES_PPGTT(dev_priv->dev))
+ if (!USES_PPGTT(dev_priv))
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -3034,20 +3033,16 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
}
-static int gen8_gmch_probe(struct drm_device *dev,
- u64 *gtt_total,
- size_t *stolen,
- phys_addr_t *mappable_base,
- u64 *mappable_end)
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u64 gtt_size;
+ struct drm_device *dev = ggtt->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u16 snb_gmch_ctl;
int ret;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- *mappable_base = pci_resource_start(dev->pdev, 2);
- *mappable_end = pci_resource_len(dev->pdev, 2);
+ ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
+ ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
@@ -3055,56 +3050,50 @@ static int gen8_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (INTEL_INFO(dev)->gen >= 9) {
- *stolen = gen9_get_stolen_size(snb_gmch_ctl);
- gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev)) {
- *stolen = chv_get_stolen_size(snb_gmch_ctl);
- gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
- *stolen = gen8_get_stolen_size(snb_gmch_ctl);
- gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
- ret = ggtt_probe_common(dev, gtt_size);
-
- dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ ret = ggtt_probe_common(dev, ggtt->size);
+ ggtt->base.clear_range = gen8_ggtt_clear_range;
if (IS_CHERRYVIEW(dev_priv))
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ else
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
return ret;
}
-static int gen6_gmch_probe(struct drm_device *dev,
- u64 *gtt_total,
- size_t *stolen,
- phys_addr_t *mappable_base,
- u64 *mappable_end)
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int gtt_size;
+ struct drm_device *dev = ggtt->base.dev;
u16 snb_gmch_ctl;
int ret;
- *mappable_base = pci_resource_start(dev->pdev, 2);
- *mappable_end = pci_resource_len(dev->pdev, 2);
+ ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
+ ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
- if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
- DRM_ERROR("Unknown GMADR size (%llx)\n",
- dev_priv->gtt.mappable_end);
+ if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
return -ENXIO;
}
@@ -3112,37 +3101,32 @@ static int gen6_gmch_probe(struct drm_device *dev,
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+ ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ret = ggtt_probe_common(dev, ggtt->size);
- ret = ggtt_probe_common(dev, gtt_size);
-
- dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.clear_range = gen6_ggtt_clear_range;
+ ggtt->base.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
return ret;
}
static void gen6_gmch_remove(struct i915_address_space *vm)
{
+ struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
- struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
-
- iounmap(gtt->gsm);
+ iounmap(ggtt->gsm);
free_scratch_page(vm->dev, vm->scratch_page);
}
-static int i915_gmch_probe(struct drm_device *dev,
- u64 *gtt_total,
- size_t *stolen,
- phys_addr_t *mappable_base,
- u64 *mappable_end)
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = ggtt->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
@@ -3151,15 +3135,16 @@ static int i915_gmch_probe(struct drm_device *dev,
return -EIO;
}
- intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+ intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
+ &ggtt->mappable_base, &ggtt->mappable_end);
- dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
- dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+ ggtt->base.insert_entries = i915_ggtt_insert_entries;
+ ggtt->base.clear_range = i915_ggtt_clear_range;
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
return 0;
@@ -3170,41 +3155,53 @@ static void i915_gmch_remove(struct i915_address_space *vm)
intel_gmch_remove();
}
-int i915_gem_gtt_init(struct drm_device *dev)
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev: DRM device
+ */
+int i915_ggtt_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_gtt *gtt = &dev_priv->gtt;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- gtt->gtt_probe = i915_gmch_probe;
- gtt->base.cleanup = i915_gmch_remove;
+ ggtt->probe = i915_gmch_probe;
+ ggtt->base.cleanup = i915_gmch_remove;
} else if (INTEL_INFO(dev)->gen < 8) {
- gtt->gtt_probe = gen6_gmch_probe;
- gtt->base.cleanup = gen6_gmch_remove;
- if (IS_HASWELL(dev) && dev_priv->ellc_size)
- gtt->base.pte_encode = iris_pte_encode;
+ ggtt->probe = gen6_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
+
+ if (HAS_EDRAM(dev))
+ ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
- gtt->base.pte_encode = hsw_pte_encode;
+ ggtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
- gtt->base.pte_encode = byt_pte_encode;
+ ggtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
- gtt->base.pte_encode = ivb_pte_encode;
+ ggtt->base.pte_encode = ivb_pte_encode;
else
- gtt->base.pte_encode = snb_pte_encode;
+ ggtt->base.pte_encode = snb_pte_encode;
} else {
- dev_priv->gtt.gtt_probe = gen8_gmch_probe;
- dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+ ggtt->probe = gen8_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
}
- gtt->base.dev = dev;
- gtt->base.is_ggtt = true;
+ ggtt->base.dev = dev;
+ ggtt->base.is_ggtt = true;
- ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
- &gtt->mappable_base, &gtt->mappable_end);
+ ret = ggtt->probe(ggtt);
if (ret)
return ret;
+ if ((ggtt->base.total - 1) >> 32) {
+ DRM_ERROR("We never expected a Global GTT with more than 32bits"
+ "of address space! Found %lldM!\n",
+ ggtt->base.total >> 20);
+ ggtt->base.total = 1ULL << 32;
+ ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ }
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -3215,9 +3212,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
- gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+ ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
@@ -3234,33 +3231,38 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
out_gtt_cleanup:
- gtt->base.cleanup(&dev_priv->gtt.base);
+ ggtt->base.cleanup(&ggtt->base);
return ret;
}
+int i915_ggtt_enable_hw(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
struct i915_vma *vma;
bool flush;
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+ true);
/* Cache flush objects bound into GGTT and rebind them. */
- vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != vm)
+ if (vma->vm != &ggtt->base)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
@@ -3283,15 +3285,17 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
}
if (USES_PPGTT(dev)) {
+ struct i915_address_space *vm;
+
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt,
- base);
+ struct i915_hw_ppgtt *ppgtt;
- if (i915_is_ggtt(vm))
+ if (vm->is_ggtt)
ppgtt = dev_priv->mm.aliasing_ppgtt;
+ else
+ ppgtt = i915_vm_to_ppgtt(vm);
gen6_write_page_range(dev_priv, &ppgtt->pd,
0, ppgtt->base.total);
@@ -3350,19 +3354,13 @@ struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
- struct i915_vma *vma;
-
- if (WARN_ON(!view))
- return ERR_PTR(-EINVAL);
-
- vma = i915_gem_obj_to_ggtt_view(obj, view);
-
- if (IS_ERR(vma))
- return vma;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, ggtt, view);
+ vma = __i915_gem_vma_create(obj, &ggtt->base, view);
return vma;
@@ -3377,11 +3375,6 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int column, row;
unsigned int src_idx;
- if (!sg) {
- st->nents = 0;
- sg = st->sgl;
- }
-
for (column = 0; column < width; column++) {
src_idx = stride * (height - 1) + column;
for (row = 0; row < height; row++) {
@@ -3405,7 +3398,7 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
+ unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
unsigned long i;
@@ -3416,14 +3409,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
- sizeof(dma_addr_t));
+ page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+ sizeof(dma_addr_t),
+ GFP_TEMPORARY);
if (!page_addr_list)
return ERR_PTR(ret);
/* Account for UV plane with NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12)
- size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
+ size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
else
size_pages_uv = 0;
@@ -3443,11 +3437,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
i++;
}
+ st->nents = 0;
+ sg = st->sgl;
+
/* Rotate the pages. */
sg = rotate_pages(page_addr_list, 0,
- rot_info->width_pages, rot_info->height_pages,
- rot_info->width_pages,
- st, NULL);
+ rot_info->plane[0].width, rot_info->plane[0].height,
+ rot_info->plane[0].width,
+ st, sg);
/* Append the UV plane if NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12) {
@@ -3459,18 +3456,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
rot_info->uv_start_page = uv_start_page;
- rotate_pages(page_addr_list, uv_start_page,
- rot_info->width_pages_uv,
- rot_info->height_pages_uv,
- rot_info->width_pages_uv,
- st, sg);
+ sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
+ rot_info->plane[1].width, rot_info->plane[1].height,
+ rot_info->plane[1].width,
+ st, sg);
}
- DRM_DEBUG_KMS(
- "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
- obj->base.size, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, rot_info->width_pages,
- rot_info->height_pages, size_pages + size_pages_uv,
+ DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size_pages + size_pages_uv,
size_pages);
drm_free_large(page_addr_list);
@@ -3482,11 +3476,9 @@ err_sg_alloc:
err_st_alloc:
drm_free_large(page_addr_list);
- DRM_DEBUG_KMS(
- "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
- obj->base.size, ret, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, rot_info->width_pages,
- rot_info->height_pages, size_pages + size_pages_uv,
+ DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
+ obj->base.size, ret, rot_info->plane[0].width,
+ rot_info->plane[0].height, size_pages + size_pages_uv,
size_pages);
return ERR_PTR(ret);
}
@@ -3634,7 +3626,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
- return view->params.rotated.size;
+ return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8774f1ba4..0008543d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t;
-#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -135,16 +135,13 @@ enum i915_ggtt_view_type {
};
struct intel_rotation_info {
- unsigned int height;
- unsigned int pitch;
unsigned int uv_offset;
uint32_t pixel_format;
- uint64_t fb_modifier;
- unsigned int width_pages, height_pages;
- uint64_t size;
- unsigned int width_pages_uv, height_pages_uv;
- uint64_t size_uv;
unsigned int uv_start_page;
+ struct {
+ /* tiles */
+ unsigned int width, height;
+ } plane[2];
};
struct i915_ggtt_view {
@@ -342,13 +339,14 @@ struct i915_address_space {
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
-struct i915_gtt {
+struct i915_ggtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base;
size_t stolen_reserved_size;
+ size_t size; /* Total size of Global GTT */
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
@@ -360,10 +358,7 @@ struct i915_gtt {
int mtrr;
- /* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
- size_t *stolen, phys_addr_t *mappable_base,
- u64 *mappable_end);
+ int (*probe)(struct i915_ggtt *ggtt);
};
struct i915_hw_ppgtt {
@@ -518,10 +513,10 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
-int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_init_global_gtt(struct drm_device *dev);
-void i915_global_gtt_cleanup(struct drm_device *dev);
-
+int i915_ggtt_init_hw(struct drm_device *dev);
+int i915_ggtt_enable_hw(struct drm_device *dev);
+void i915_gem_init_ggtt(struct drm_device *dev);
+void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index fc7e6d5c6..71611bf21 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
drm_gem_object_unreference(&so->obj->base);
}
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so)
{
int ret;
- if (WARN_ON(ring->id != RCS))
+ if (WARN_ON(engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, ring->dev);
+ ret = render_state_init(so, engine->dev);
if (ret)
return ret;
@@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
struct render_state so;
int ret;
- ret = i915_gem_render_state_prepare(req->ring, &so);
+ ret = i915_gem_render_state_prepare(req->engine, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+ ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
if (so.aux_batch_size > 8) {
- ret = req->ring->dispatch_execbuffer(req,
+ ret = req->engine->dispatch_execbuffer(req,
(so.ggtt_offset +
so.aux_batch_offset),
so.aux_batch_size,
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index e641bb093..6aaa3a10a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -43,7 +43,7 @@ struct render_state {
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3af40616b..66571466e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -28,6 +28,7 @@
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
@@ -69,6 +70,10 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
+ /* Only shmemfs objects are backed by swap */
+ if (!obj->base.filp)
+ return false;
+
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
@@ -166,6 +171,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
obj->madv != I915_MADV_DONTNEED)
continue;
+ if (flags & I915_SHRINK_VMAPS &&
+ !is_vmalloc_addr(obj->mapping))
+ continue;
+
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
@@ -246,7 +255,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
+ if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -288,67 +297,82 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed;
}
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
-{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
+struct shrinker_lock_uninterruptible {
bool was_interruptible;
bool unlock;
+};
+
+static bool
+i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
+ struct shrinker_lock_uninterruptible *slu,
+ int timeout_ms)
+{
+ unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+ while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
+ return false;
+ if (--timeout == 0) {
+ pr_err("Unable to lock GPU to purge memory.\n");
+ return false;
+ }
}
- was_interruptible = dev_priv->mm.interruptible;
+ slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
+ return true;
+}
- freed_pages = i915_gem_shrink_all(dev_priv);
+static void
+i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
+ struct shrinker_lock_uninterruptible *slu)
+{
+ dev_priv->mm.interruptible = slu->was_interruptible;
+ if (slu->unlock)
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct shrinker_lock_uninterruptible slu;
+ struct drm_i915_gem_object *obj;
+ unsigned long unevictable, bound, unbound, freed_pages;
- dev_priv->mm.interruptible = was_interruptible;
+ if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+ return NOTIFY_DONE;
+
+ freed_pages = i915_gem_shrink_all(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
- unbound = bound = pinned = 0;
+ unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
+ if (!can_release_pages(obj))
+ unevictable += obj->base.size >> PAGE_SHIFT;
else
- unbound += obj->base.size;
+ unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
+ if (!can_release_pages(obj))
+ unevictable += obj->base.size >> PAGE_SHIFT;
else
- bound += obj->base.size;
+ bound += obj->base.size >> PAGE_SHIFT;
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
+ pr_info("Purging GPU memory, %lu pages freed, "
+ "%lu pages still pinned.\n",
+ freed_pages, unevictable);
if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
+ pr_err("%lu and %lu pages still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
@@ -356,6 +380,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
return NOTIFY_DONE;
}
+static int
+i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.vmap_notifier);
+ struct shrinker_lock_uninterruptible slu;
+ unsigned long freed_pages;
+
+ if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+ return NOTIFY_DONE;
+
+ freed_pages = i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_VMAPS);
+
+ i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
+
+ *(unsigned long *)ptr += freed_pages;
+ return NOTIFY_DONE;
+}
+
/**
* i915_gem_shrinker_init - Initialize i915 shrinker
* @dev_priv: i915 device
@@ -371,6 +418,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+
+ dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
}
/**
@@ -381,6 +431,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
*/
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
{
+ WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 2e6e9fb6f..44004e3f0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ */
+ if (start < 4096 && (IS_GEN8(dev_priv) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
@@ -72,9 +74,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
- alignment, 0,
- dev_priv->gtt.stolen_usable_size);
+ alignment, 0,
+ ggtt->stolen_usable_size);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -87,14 +91,15 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
u32 base;
/* Almost universally we can find the Graphics Base of Stolen Memory
- * at offset 0x5c in the igfx configuration space. On a few (desktop)
- * machines this is also mirrored in the bridge device at different
- * locations, or in the MCHBAR.
+ * at register BSM (0x5c) in the igfx configuration space. On a few
+ * (desktop) machines this is also mirrored in the bridge device at
+ * different locations, or in the MCHBAR.
*
* On 865 we just check the TOUD register.
*
@@ -104,9 +109,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
- /* Read Graphics Base of Stolen Memory directly */
- pci_read_config_dword(dev->pdev, 0x5c, &base);
- base &= ~((1<<20) - 1);
+ u32 bsm;
+
+ pci_read_config_dword(dev->pdev, BSM, &bsm);
+
+ base = bsm & BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;
@@ -134,7 +141,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_845G(dev)) {
u32 tseg_size = 0;
u32 tom;
@@ -158,7 +165,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_I830(dev)) {
u32 tseg_size = 0;
u32 tom;
@@ -178,7 +185,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - ggtt->stolen_size;
}
if (base == 0)
@@ -189,41 +196,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct {
u32 start, end;
} stolen[2] = {
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + ggtt->stolen_size, },
+ { .start = base, .end = base + ggtt->stolen_size, },
};
- u64 gtt_start, gtt_end;
+ u64 ggtt_start, ggtt_end;
- gtt_start = I915_READ(PGTBL_CTL);
+ ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev))
- gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
- (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
- gtt_start &= PGTBL_ADDRESS_LO_MASK;
- gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+ ggtt_start &= PGTBL_ADDRESS_LO_MASK;
+ ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
- if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
- stolen[0].end = gtt_start;
- if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
- stolen[1].start = gtt_end;
+ if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
+ stolen[0].end = ggtt_start;
+ if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
+ stolen[1].start = ggtt_end;
/* pick the larger of the two chunks */
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
- dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ ggtt->stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
- dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ ggtt->stolen_size = stolen[1].end - stolen[1].start;
}
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
- (unsigned long long) gtt_start,
- (unsigned long long) gtt_end - 1);
+ (unsigned long long)ggtt_start,
+ (unsigned long long)ggtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
- base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ base, base + (u32)ggtt->stolen_size - 1);
}
}
@@ -233,7 +240,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -245,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region(dev->dev, base + 1,
- dev_priv->gtt.stolen_size - 1,
+ ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
@@ -253,7 +260,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base, base + (uint32_t)ggtt->stolen_size);
base = 0;
}
}
@@ -274,11 +281,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
- dev_priv->gtt.stolen_size;
+ ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -369,10 +377,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -388,7 +397,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -401,14 +411,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
}
#endif
- if (dev_priv->gtt.stolen_size == 0)
+ if (ggtt->stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
@@ -458,19 +468,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0;
}
- dev_priv->gtt.stolen_reserved_base = reserved_base;
- dev_priv->gtt.stolen_reserved_size = reserved_size;
+ ggtt->stolen_reserved_base = reserved_base;
+ ggtt->stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
- dev_priv->gtt.stolen_size >> 10,
- (dev_priv->gtt.stolen_size - reserved_total) >> 10);
+ ggtt->stolen_size >> 10,
+ (ggtt->stolen_size - reserved_total) >> 10);
- dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
- reserved_total;
+ ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
/*
* Basic memrange allocator for stolen space.
@@ -483,7 +492,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later.
*/
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
return 0;
}
@@ -492,12 +501,13 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+ BUG_ON(offset > ggtt->stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -628,8 +638,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
@@ -675,7 +685,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -688,8 +698,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/
vma->node.start = gtt_offset;
vma->node.size = size;
- if (drm_mm_initialized(&ggtt->mm)) {
- ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+ if (drm_mm_initialized(&ggtt->base.mm)) {
+ ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err;
@@ -697,7 +707,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
}
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7410f6c96..b9bdb3403 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -166,7 +166,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret = 0;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
@@ -297,7 +297,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 4d30b60de..32d9726e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -34,7 +34,7 @@
struct i915_mm_struct {
struct mm_struct *mm;
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
@@ -49,6 +49,7 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
+ struct workqueue_struct *wq;
};
struct i915_mmu_object {
@@ -60,6 +61,37 @@ struct i915_mmu_object {
bool attached;
};
+static void wait_rendering(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+ int i, n;
+
+ if (!obj->active)
+ return;
+
+ n = 0;
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req == NULL)
+ continue;
+
+ requests[n++] = i915_gem_request_reference(req);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ for (i = 0; i < n; i++)
+ __i915_wait_request(requests[i], false, NULL, NULL);
+
+ mutex_lock(&dev->struct_mutex);
+
+ for (i = 0; i < n; i++)
+ i915_gem_request_unreference(requests[i]);
+}
+
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
@@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work)
struct i915_vma *vma, *tmp;
bool was_interruptible;
+ wait_rendering(obj);
+
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
- int ret = i915_vma_unbind(vma);
- WARN_ON(ret && ret != -EIO);
- }
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
+ WARN_ON(i915_vma_unbind(vma));
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;
@@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
*/
mo = container_of(it, struct i915_mmu_object, it);
if (kref_get_unless_zero(&mo->obj->base.refcount))
- schedule_work(&mo->work);
+ queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled);
it = interval_tree_iter_next(it, start, end);
@@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
spin_unlock(&mn->lock);
+
+ flush_workqueue(mn->wq);
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT;
+ mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+ if (mn->wq == NULL) {
+ kfree(mn);
+ return ERR_PTR(-ENOMEM);
+ }
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
+ destroy_workqueue(mn->wq);
kfree(mn);
return ERR_PTR(ret);
}
@@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
return mn;
down_write(&mm->mm->mmap_sem);
- mutex_lock(&to_i915(mm->dev)->mm_lock);
+ mutex_lock(&mm->i915->mm_lock);
if ((mn = mm->mn) == NULL) {
mn = i915_mmu_notifier_create(mm->mm);
if (!IS_ERR(mn))
mm->mn = mn;
}
- mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
return mn;
@@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
+ destroy_workqueue(mn->wq);
kfree(mn);
}
@@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
}
kref_init(&mm->kref);
- mm->dev = obj->base.dev;
+ mm->i915 = to_i915(obj->base.dev);
mm->mm = current->mm;
atomic_inc(&current->mm->mm_count);
@@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref)
/* Protected by dev_priv->mm_lock */
hash_del(&mm->node);
- mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ mutex_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work);
@@ -494,10 +535,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -ENOMEM;
pinned = 0;
- pvec = kmalloc(npages*sizeof(struct page *),
- GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
- if (pvec == NULL)
- pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
@@ -639,14 +677,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL;
pinned = 0;
if (obj->userptr.mm->mm == current->mm) {
- pvec = kmalloc(num_pages*sizeof(struct page *),
- GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+ GFP_TEMPORARY);
if (pvec == NULL) {
- pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (pvec == NULL) {
- __i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
- }
+ __i915_gem_userptr_set_active(obj, false);
+ return -ENOMEM;
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -763,6 +798,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int ret;
u32 handle;
+ if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ /* We cannot support coherent userptr objects on hw without
+ * LLC and broken snooping.
+ */
+ return -ENODEV;
+ }
+
if (args->flags & ~(I915_USERPTR_READ_ONLY |
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 831895b8c..89725c9ef 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -198,7 +198,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err->size,
err->read_domains,
err->write_domain);
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < I915_NUM_ENGINES; i++)
err_printf(m, "%02x ", err->rseqno[i]);
err_printf(m, "] %02x", err->wseqno);
@@ -230,8 +230,6 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
return "wait";
case HANGCHECK_ACTIVE:
return "active";
- case HANGCHECK_ACTIVE_LOOP:
- return "active (loop)";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
@@ -298,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
}
}
err_printf(m, " seqno: 0x%08x\n", ring->seqno);
+ err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
err_printf(m, " waiting: %s\n", yesno(ring->waiting));
err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
@@ -433,7 +432,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
obj = error->ring[i].batchbuffer;
if (obj) {
- err_puts(m, dev_priv->ring[i].name);
+ err_puts(m, dev_priv->engine[i].name);
if (error->ring[i].pid != -1)
err_printf(m, " (submitted by %s [%d])",
error->ring[i].comm,
@@ -447,14 +446,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = error->ring[i].wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
if (error->ring[i].num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
error->ring[i].num_requests);
for (j = 0; j < error->ring[i].num_requests; j++) {
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
@@ -466,7 +465,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if ((obj = error->ring[i].ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
@@ -480,7 +479,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
}
err_printf(m, "%s --- HW Status = 0x%08llx\n",
- dev_priv->ring[i].name, hws_offset);
+ dev_priv->engine[i].name, hws_offset);
offset = 0;
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
@@ -493,9 +492,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ obj = error->ring[i].wa_ctx;
+ if (obj) {
+ u64 wa_ctx_offset = obj->gtt_offset;
+ u32 *wa_ctx_page = &obj->pages[0][0];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
+ engine->wa_ctx.per_ctx.size);
+
+ err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
+ dev_priv->engine[i].name, wa_ctx_offset);
+ offset = 0;
+ for (elt = 0; elt < wa_ctx_size; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ wa_ctx_page[elt + 0],
+ wa_ctx_page[elt + 1],
+ wa_ctx_page[elt + 2],
+ wa_ctx_page[elt + 3]);
+ offset += 16;
+ }
+ }
+
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
@@ -585,6 +606,7 @@ static void i915_error_state_free(struct kref *error_ref)
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
kfree(error->ring[i].requests);
+ i915_error_object_free(error->ring[i].wa_ctx);
}
i915_error_object_free(error->semaphore_obj);
@@ -606,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
struct i915_address_space *vm)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_error_object *dst;
struct i915_vma *vma = NULL;
int num_pages;
@@ -632,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
@@ -642,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
- if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+ if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind;
}
/* Cannot access snooped pages through the aperture */
- if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
+ if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
+ !HAS_LLC(dev_priv))
goto unwind;
dst->page_count = num_pages;
@@ -668,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ s = io_mapping_map_atomic_wc(ggtt->mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
@@ -701,7 +725,7 @@ unwind:
return NULL;
}
#define i915_error_ggtt_object_create(dev_priv, src) \
- i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+ i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
@@ -711,7 +735,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
@@ -726,7 +750,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->ring = obj->last_write_req ?
- i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+ i915_gem_request_get_engine(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level;
}
@@ -788,7 +812,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
if (ring_id)
*ring_id = i;
@@ -821,11 +845,11 @@ static void i915_gem_record_fences(struct drm_device *dev,
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
struct intel_engine_cs *to;
- int i;
+ enum intel_engine_id id;
if (!i915_semaphore_is_enabled(dev_priv->dev))
return;
@@ -835,68 +859,69 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
i915_error_ggtt_object_create(dev_priv,
dev_priv->semaphore_obj);
- for_each_ring(to, dev_priv, i) {
+ for_each_engine_id(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
- if (ring == to)
+ if (engine == to)
continue;
- signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+ signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
/ 4;
tmp = error->semaphore_obj->pages[0];
- idx = intel_ring_sync_index(ring, to);
+ idx = intel_ring_sync_index(engine, to);
ering->semaphore_mboxes[idx] = tmp[signal_offset];
- ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+ ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
- ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
- ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+ ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+ ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+ ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
- if (HAS_VEBOX(dev_priv->dev)) {
+ if (HAS_VEBOX(dev_priv)) {
ering->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ I915_READ(RING_SYNC_2(engine->mmio_base));
+ ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
}
}
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
- ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+ ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+ ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_INFO(dev)->gen >= 8)
- gen8_record_semaphore_state(dev_priv, error, ring, ering);
+ gen8_record_semaphore_state(dev_priv, error, engine,
+ ering);
else
- gen6_record_semaphore_state(dev_priv, ring, ering);
+ gen6_record_semaphore_state(dev_priv, engine, ering);
}
if (INTEL_INFO(dev)->gen >= 4) {
- ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
- ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
- ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
- ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
- ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
- ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+ ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+ ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+ ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+ ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) {
- ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
- ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+ ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+ ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
- ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+ ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
} else {
ering->faddr = I915_READ(DMA_FADD_I8XX);
ering->ipeir = I915_READ(IPEIR);
@@ -904,20 +929,21 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instdone = I915_READ(GEN2_INSTDONE);
}
- ering->waiting = waitqueue_active(&ring->irq_queue);
- ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
- ering->seqno = ring->get_seqno(ring, false);
- ering->acthd = intel_ring_get_active_head(ring);
- ering->start = I915_READ_START(ring);
- ering->head = I915_READ_HEAD(ring);
- ering->tail = I915_READ_TAIL(ring);
- ering->ctl = I915_READ_CTL(ring);
+ ering->waiting = waitqueue_active(&engine->irq_queue);
+ ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+ ering->acthd = intel_ring_get_active_head(engine);
+ ering->seqno = engine->get_seqno(engine);
+ ering->last_seqno = engine->last_submitted_seqno;
+ ering->start = I915_READ_START(engine);
+ ering->head = I915_READ_HEAD(engine);
+ ering->tail = I915_READ_TAIL(engine);
+ ering->ctl = I915_READ_CTL(engine);
if (I915_NEED_GFX_HWS(dev)) {
i915_reg_t mmio;
if (IS_GEN7(dev)) {
- switch (ring->id) {
+ switch (engine->id) {
default:
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
@@ -932,51 +958,51 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else if (IS_GEN6(engine->dev)) {
+ mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
+ mmio = RING_HWS_PGA(engine->mmio_base);
}
ering->hws = I915_READ(mmio);
}
- ering->hangcheck_score = ring->hangcheck.score;
- ering->hangcheck_action = ring->hangcheck.action;
+ ering->hangcheck_score = engine->hangcheck.score;
+ ering->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev)) {
int i;
- ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+ ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev))
ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(ring));
+ I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev))
ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(ring));
+ I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ I915_READ(GEN8_RING_PDP_UDW(engine, i));
ering->vm_info.pdp[i] <<= 32;
ering->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ I915_READ(GEN8_RING_PDP_LDW(engine, i));
}
}
}
-static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */
- if (ring->id != RCS || !error->ccid)
+ if (engine->id != RCS || !error->ccid)
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -993,30 +1019,31 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_engine_cs *engine = &dev_priv->engine[i];
struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
- if (ring->dev == NULL)
+ if (engine->dev == NULL)
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, engine, &error->ring[i]);
- request = i915_gem_find_active_request(ring);
+ request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
- &dev_priv->gtt.base;
+ &ggtt->base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1027,10 +1054,10 @@ static void i915_gem_record_rings(struct drm_device *dev,
request->batch_obj,
vm);
- if (HAS_BROKEN_CS_TLB(dev_priv->dev))
+ if (HAS_BROKEN_CS_TLB(dev_priv))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
- ring->scratch.obj);
+ engine->scratch.obj);
if (request->pid) {
struct task_struct *task;
@@ -1052,11 +1079,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
* executed).
*/
if (request)
- rbuf = request->ctx->engine[ring->id].ringbuf;
+ rbuf = request->ctx->engine[engine->id].ringbuf;
else
- rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
+ rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
} else
- rbuf = ring->buffer;
+ rbuf = engine->buffer;
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
@@ -1065,12 +1092,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
error->ring[i].hws_page =
- i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+ i915_error_ggtt_object_create(dev_priv,
+ engine->status_page.obj);
+
+ if (engine->wa_ctx.obj) {
+ error->ring[i].wa_ctx =
+ i915_error_ggtt_object_create(dev_priv,
+ engine->wa_ctx.obj);
+ }
- i915_gem_record_active_context(ring, error, &error->ring[i]);
+ i915_gem_record_active_context(engine, error, &error->ring[i]);
count = 0;
- list_for_each_entry(request, &ring->request_list, list)
+ list_for_each_entry(request, &engine->request_list, list)
count++;
error->ring[i].num_requests = count;
@@ -1083,7 +1117,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, list) {
struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
@@ -1272,7 +1306,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
static void i915_error_capture_msg(struct drm_device *dev,
struct drm_i915_error_state *error,
- bool wedged,
+ u32 engine_mask,
const char *error_msg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1295,7 +1329,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
error_msg,
- wedged ? "reset" : "continue");
+ engine_mask ? "reset" : "continue");
}
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
@@ -1318,7 +1352,7 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev, bool wedged,
+void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
const char *error_msg)
{
static bool warned;
@@ -1346,7 +1380,7 @@ void i915_capture_error_state(struct drm_device *dev, bool wedged,
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
- i915_error_capture_msg(dev, error, wedged, error_msg);
+ i915_error_capture_msg(dev, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index e4ba58222..80786d9f9 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -27,9 +27,12 @@
/* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000)
+#define GS_RESET_SHIFT 0
+#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT)
#define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
+#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
@@ -37,7 +40,13 @@
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
-#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
+#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
+#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT)
+#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT)
+#define GS_AUTH_STATUS_SHIFT 30
+#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d7543efc8..d40c13fb6 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_doorbell_info *doorbell;
- void *base;
- base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
- doorbell = base + client->doorbell_offset;
+ doorbell = client->client_base + client->doorbell_offset;
- doorbell->db_status = 1;
+ doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
-
- kunmap_atomic(base);
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
- void *base;
int attempt = 2, ret = -EAGAIN;
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
- desc = base + gc->proc_desc_offset;
+ desc = gc->client_base + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = base + gc->doorbell_offset;
+ db = gc->client_base + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
- /* Finally, update the cached copy of the GuC's WQ head */
- gc->wq_head = desc->head;
-
- kunmap_atomic(base);
return ret;
}
@@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc,
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell;
- void *base;
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value;
- base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
- doorbell = base + client->doorbell_offset;
+ doorbell = client->client_base + client->doorbell_offset;
- doorbell->db_status = 0;
-
- kunmap_atomic(base);
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
@@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_process_desc *desc;
- void *base;
- base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
- desc = base + client->proc_desc_offset;
+ desc = client->client_base + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
-
- kunmap_atomic(base);
}
/*
@@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
+ struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
- int i;
+ enum intel_engine_id id;
+ u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@@ -390,8 +374,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_ring(ring, dev_priv, i) {
- struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
+ for_each_engine_id(engine, dev_priv, id) {
+ struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
@@ -402,48 +386,44 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- obj = ctx->engine[i].state;
+ obj = ctx->engine[id].state;
if (!obj)
break; /* XXX: continue? */
- ctx_desc = intel_lr_context_descriptor(ctx, ring);
+ ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
- lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
- LRC_STATE_PN * PAGE_SIZE;
+ gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
+ (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ctx->engine[i].ringbuf->obj;
+ obj = ctx->engine[id].ringbuf->obj;
+ gfx_addr = i915_gem_obj_ggtt_offset(obj);
- lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
- lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
- lrc->ring_next_free_location = lrc->ring_begin;
+ lrc->ring_begin = gfx_addr;
+ lrc->ring_end = gfx_addr + obj->base.size - 1;
+ lrc->ring_next_free_location = gfx_addr;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << ring->guc_id);
+ desc.engines_used |= (1 << engine->guc_id);
}
WARN_ON(desc.engines_used == 0);
/*
- * The CPU address is only needed at certain points, so kmap_atomic on
- * demand instead of storing it in the ctx descriptor.
- * XXX: May make debug easier to have it mapped
+ * The doorbell, process descriptor, and workqueue are all parts
+ * of the client object, which the GuC will reference via the GGTT
*/
- desc.db_trigger_cpu = 0;
- desc.db_trigger_uk = client->doorbell_offset +
- i915_gem_obj_ggtt_offset(client->client_obj);
- desc.db_trigger_phy = client->doorbell_offset +
- sg_dma_address(client->client_obj->pages->sgl);
-
- desc.process_desc = client->proc_desc_offset +
- i915_gem_obj_ggtt_offset(client->client_obj);
-
- desc.wq_addr = client->wq_offset +
- i915_gem_obj_ggtt_offset(client->client_obj);
-
+ gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
+ desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+ client->doorbell_offset;
+ desc.db_trigger_cpu = (uintptr_t)client->client_base +
+ client->doorbell_offset;
+ desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
+ desc.process_desc = gfx_addr + client->proc_desc_offset;
+ desc.wq_addr = gfx_addr + client->wq_offset;
desc.wq_size = client->wq_size;
/*
@@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
- void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
- /* Quickly return if wq space is available since last time we cache the
- * head position. */
- if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
- return 0;
-
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
- desc = base + gc->proc_desc_offset;
+ desc = gc->client_base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
- gc->wq_head = desc->head;
-
- if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
+ if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
ret = 0;
break;
}
@@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
usleep_range(1000, 2000);
};
- kunmap_atomic(base);
-
return ret;
}
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
+ struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off, space;
- space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+ desc = gc->client_base + gc->proc_desc_offset;
+ space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
@@ -542,11 +513,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
- (rq->ring->guc_id << WQ_TARGET_SHIFT) |
+ (rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
- wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
+ wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
+ rq->engine);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->ringbuf->tail >> 3;
@@ -569,7 +541,7 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
- unsigned int engine_id = rq->ring->guc_id;
+ unsigned int engine_id = rq->engine->guc_id;
int q_ret, b_ret;
q_ret = guc_add_workqueue_item(client, rq);
@@ -660,21 +632,28 @@ static void guc_client_free(struct drm_device *dev,
if (!client)
return;
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
- /*
- * First disable the doorbell, then tell the GuC we've
- * finished with it, finally deallocate it in our bitmap
- */
- guc_disable_doorbell(guc, client);
- host2guc_release_doorbell(guc, client);
- release_doorbell(guc, client->doorbell_id);
- }
-
/*
* XXX: wait for any outstanding submissions before freeing memory.
* Be sure to drop any locks
*/
+ if (client->client_base) {
+ /*
+ * If we got as far as setting up a doorbell, make sure
+ * we shut it down before unmapping & deallocating the
+ * memory. So first disable the doorbell, then tell the
+ * GuC that we've finished with it, finally deallocate
+ * it in our bitmap
+ */
+ if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
+ guc_disable_doorbell(guc, client);
+ host2guc_release_doorbell(guc, client);
+ release_doorbell(guc, client->doorbell_id);
+ }
+
+ kunmap(kmap_to_page(client->client_base));
+ }
+
gem_release_guc_obj(client->client_obj);
if (client->ctx_index != GUC_INVALID_CTX_ID) {
@@ -695,7 +674,7 @@ static void guc_client_free(struct drm_device *dev,
* @ctx: the context that owns the client (we use the default render
* context)
*
- * Return: An i915_guc_client object if success.
+ * Return: An i915_guc_client object if success, else NULL.
*/
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
uint32_t priority,
@@ -727,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
if (!obj)
goto err;
+ /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->client_obj = obj;
+ client->client_base = kmap(i915_gem_object_get_page(obj, 0));
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
@@ -839,9 +820,9 @@ static void guc_create_ads(struct intel_guc *guc)
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct page *page;
- u32 size, i;
+ u32 size;
/* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
@@ -867,11 +848,11 @@ static void guc_create_ads(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
- ring = &dev_priv->ring[RCS];
- ads->golden_context_lrca = ring->status_page.gfx_addr;
+ engine = &dev_priv->engine[RCS];
+ ads->golden_context_lrca = engine->status_page.gfx_addr;
- for_each_ring(ring, dev_priv, i)
- ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+ for_each_engine(engine, dev_priv)
+ ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
@@ -883,12 +864,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_ring(ring, dev_priv, i) {
- reg_state->mmio_white_list[ring->guc_id].mmio_start =
- ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+ for_each_engine(engine, dev_priv) {
+ reg_state->mmio_white_list[engine->guc_id].mmio_start =
+ engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- reg_state->mmio_white_list[ring->guc_id].count = 0;
+ reg_state->mmio_white_list[engine->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5a..aab47f7bb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,14 +994,15 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
return;
}
-static void notify_ring(struct intel_engine_cs *ring)
+static void notify_ring(struct intel_engine_cs *engine)
{
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- trace_i915_gem_request_notify(ring);
+ trace_i915_gem_request_notify(engine);
+ engine->user_interrupts++;
- wake_up_all(&ring->irq_queue);
+ wake_up_all(&engine->irq_queue);
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1079,11 +1080,10 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- if (ring->irq_refcount)
+ for_each_engine(engine, dev_priv)
+ if (engine->irq_refcount)
return true;
return false;
@@ -1219,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work)
i915_reg_t reg;
slice--;
- if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+ if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
break;
dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -1258,24 +1258,23 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irq(&dev_priv->irq_lock);
- gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
+static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_L3_DPF(dev))
+ if (!HAS_L3_DPF(dev_priv))
return;
spin_lock(&dev_priv->irq_lock);
- gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock(&dev_priv->irq_lock);
- iir &= GT_PARITY_ERROR(dev);
+ iir &= GT_PARITY_ERROR(dev_priv);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
dev_priv->l3_parity.which_slice |= 1 << 1;
@@ -1285,102 +1284,85 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
-static void ilk_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
+static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->engine[VCS]);
}
-static void snb_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
+static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[BCS]);
+ notify_ring(&dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
- if (gt_iir & GT_PARITY_ERROR(dev))
- ivybridge_parity_error_irq_handler(dev, gt_iir);
+ if (gt_iir & GT_PARITY_ERROR(dev_priv))
+ ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
}
static __always_inline void
-gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
+gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
- notify_ring(ring);
+ notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
- intel_lrc_irq_handler(ring);
+ tasklet_schedule(&engine->irq_tasklet);
}
-static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 master_ctl)
+static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
+ u32 master_ctl,
+ u32 gt_iir[4])
{
irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
- if (iir) {
- I915_WRITE_FW(GEN8_GT_IIR(0), iir);
+ gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
+ if (gt_iir[0]) {
+ I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
ret = IRQ_HANDLED;
-
- gen8_cs_irq_handler(&dev_priv->ring[RCS],
- iir, GEN8_RCS_IRQ_SHIFT);
-
- gen8_cs_irq_handler(&dev_priv->ring[BCS],
- iir, GEN8_BCS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
- if (iir) {
- I915_WRITE_FW(GEN8_GT_IIR(1), iir);
+ gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
+ if (gt_iir[1]) {
+ I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
ret = IRQ_HANDLED;
-
- gen8_cs_irq_handler(&dev_priv->ring[VCS],
- iir, GEN8_VCS1_IRQ_SHIFT);
-
- gen8_cs_irq_handler(&dev_priv->ring[VCS2],
- iir, GEN8_VCS2_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
- if (iir) {
- I915_WRITE_FW(GEN8_GT_IIR(3), iir);
+ gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
+ if (gt_iir[3]) {
+ I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
ret = IRQ_HANDLED;
-
- gen8_cs_irq_handler(&dev_priv->ring[VECS],
- iir, GEN8_VECS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
if (master_ctl & GEN8_GT_PM_IRQ) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
- if (iir & dev_priv->pm_rps_events) {
+ gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
+ if (gt_iir[2] & dev_priv->pm_rps_events) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- iir & dev_priv->pm_rps_events);
+ gt_iir[2] & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, iir);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1388,6 +1370,31 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
return ret;
}
+static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
+ u32 gt_iir[4])
+{
+ if (gt_iir[0]) {
+ gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gt_iir[0], GEN8_RCS_IRQ_SHIFT);
+ gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gt_iir[0], GEN8_BCS_IRQ_SHIFT);
+ }
+
+ if (gt_iir[1]) {
+ gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
+ gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
+ }
+
+ if (gt_iir[3])
+ gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+
+ if (gt_iir[2] & dev_priv->pm_rps_events)
+ gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+}
+
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
@@ -1627,9 +1634,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_INFO(dev_priv)->gen >= 8)
return;
- if (HAS_VEBOX(dev_priv->dev)) {
+ if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VECS]);
+ notify_ring(&dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1644,10 +1651,10 @@ static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
return true;
}
-static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
+ u32 pipe_stats[I915_MAX_PIPES])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pipe_stats[I915_MAX_PIPES] = { };
int pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1701,6 +1708,13 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
I915_WRITE(reg, pipe_stats[pipe]);
}
spin_unlock(&dev_priv->irq_lock);
+}
+
+static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+ u32 pipe_stats[I915_MAX_PIPES])
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -1723,21 +1737,20 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
gmbus_irq_handler(dev);
}
-static void i9xx_hpd_irq_handler(struct drm_device *dev)
+static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- u32 pin_mask = 0, long_mask = 0;
- if (!hotplug_status)
- return;
+ if (hotplug_status)
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ return hotplug_status;
+}
+
+static void i9xx_hpd_irq_handler(struct drm_device *dev,
+ u32 hotplug_status)
+{
+ u32 pin_mask = 0, long_mask = 0;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
@@ -1768,7 +1781,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1777,40 +1789,72 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- while (true) {
- /* Find, clear, then process each source of interrupt */
+ do {
+ u32 iir, gt_iir, pm_iir;
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 ier = 0;
gt_iir = I915_READ(GTIIR);
- if (gt_iir)
- I915_WRITE(GTIIR, gt_iir);
-
pm_iir = I915_READ(GEN6_PMIIR);
- if (pm_iir)
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
iir = I915_READ(VLV_IIR);
- if (iir) {
- /* Consume port before clearing IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
- I915_WRITE(VLV_IIR, iir);
- }
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
- goto out;
+ break;
ret = IRQ_HANDLED;
+ /*
+ * Theory on interrupt generation, based on empirical evidence:
+ *
+ * x = ((VLV_IIR & VLV_IER) ||
+ * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
+ * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
+ *
+ * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+ * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
+ * guarantee the CPU interrupt will be raised again even if we
+ * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
+ * bits this time around.
+ */
+ I915_WRITE(VLV_MASTER_IER, 0);
+ ier = I915_READ(VLV_IER);
+ I915_WRITE(VLV_IER, 0);
+
if (gt_iir)
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_handler(dev, iir);
- }
+ valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+
+ /*
+ * VLV_IIR is single buffered, and reflects the level
+ * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+ */
+ if (iir)
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(VLV_IER, ier);
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(VLV_MASTER_IER);
+
+ if (gt_iir)
+ snb_gt_irq_handler(dev_priv, gt_iir);
+ if (pm_iir)
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+
+ valleyview_pipestat_irq_handler(dev, pipe_stats);
+ } while (0);
-out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
@@ -1820,7 +1864,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1829,7 +1872,13 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
+ u32 master_ctl, iir;
+ u32 gt_iir[4] = {};
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 ier = 0;
+
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1838,26 +1887,50 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
+ /*
+ * Theory on interrupt generation, based on empirical evidence:
+ *
+ * x = ((VLV_IIR & VLV_IER) ||
+ * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
+ * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
+ *
+ * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+ * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
+ * guarantee the CPU interrupt will be raised again even if we
+ * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
+ * bits this time around.
+ */
I915_WRITE(GEN8_MASTER_IRQ, 0);
+ ier = I915_READ(VLV_IER);
+ I915_WRITE(VLV_IER, 0);
- /* Find, clear, then process each source of interrupt */
-
- if (iir) {
- /* Consume port before clearing IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
- I915_WRITE(VLV_IIR, iir);
- }
+ gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
- gen8_gt_irq_handler(dev_priv, master_ctl);
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_handler(dev, iir);
+ valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
- I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ /*
+ * VLV_IIR is single buffered, and reflects the level
+ * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+ */
+ if (iir)
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(VLV_IER, ier);
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+
+ gen8_gt_irq_handler(dev_priv, gt_iir);
+
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+
+ valleyview_pipestat_irq_handler(dev, pipe_stats);
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2217,9 +2290,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ snb_gt_irq_handler(dev_priv, gt_iir);
else
- ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ ilk_gt_irq_handler(dev_priv, gt_iir);
}
de_iir = I915_READ(DEIIR);
@@ -2398,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv))
+ if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, iir);
@@ -2419,6 +2492,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
+ u32 gt_iir[4] = {};
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv))
@@ -2435,7 +2509,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
disable_rpm_wakeref_asserts(dev_priv);
/* Find, clear, then process each source of interrupt */
- ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+ ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(dev_priv, gt_iir);
ret |= gen8_de_irq_handler(dev_priv, master_ctl);
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2449,8 +2524,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
/*
* Notify all waiters for GPU completion events that reset state has
@@ -2460,8 +2534,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
+ for_each_engine(engine, dev_priv)
+ wake_up_all(&engine->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
@@ -2484,7 +2558,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
static void i915_reset_and_wakeup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2502,7 +2575,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
- if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
@@ -2530,25 +2603,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
- if (ret == 0) {
- /*
- * After all the gem state is reset, increment the reset
- * counter and wake up everyone waiting for the reset to
- * complete.
- *
- * Since unlock operations are a one-sided barrier only,
- * we need to insert a barrier here to order any seqno
- * updates before
- * the counter increment.
- */
- smp_mb__before_atomic();
- atomic_inc(&dev_priv->gpu_error.reset_counter);
-
+ if (ret == 0)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
- } else {
- atomic_or(I915_WEDGED, &error->reset_counter);
- }
/*
* Note: The wake_up also serves as a memory barrier so that
@@ -2653,14 +2710,14 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
/**
* i915_handle_error - handle a gpu error
* @dev: drm device
- *
+ * @engine_mask: mask representing engines that are hung
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_device *dev, bool wedged,
+void i915_handle_error(struct drm_device *dev, u32 engine_mask,
const char *fmt, ...)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2671,10 +2728,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
- i915_capture_error_state(dev, wedged, error_msg);
+ i915_capture_error_state(dev, engine_mask, error_msg);
i915_report_and_clear_eir(dev);
- if (wedged) {
+ if (engine_mask) {
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);
@@ -2805,10 +2862,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
}
static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *engine, u32 seqno)
{
- return (list_empty(&ring->request_list) ||
- i915_seqno_passed(seqno, ring->last_submitted_seqno));
+ return i915_seqno_passed(seqno,
+ READ_ONCE(engine->last_submitted_seqno));
}
static bool
@@ -2824,42 +2881,42 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+ u64 offset)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller;
- int i;
- if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
- for_each_ring(signaller, dev_priv, i) {
- if (ring == signaller)
+ if (INTEL_INFO(dev_priv)->gen >= 8) {
+ for_each_engine(signaller, dev_priv) {
+ if (engine == signaller)
continue;
- if (offset == signaller->semaphore.signal_ggtt[ring->id])
+ if (offset == signaller->semaphore.signal_ggtt[engine->id])
return signaller;
}
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
- for_each_ring(signaller, dev_priv, i) {
- if(ring == signaller)
+ for_each_engine(signaller, dev_priv) {
+ if(engine == signaller)
continue;
- if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
return signaller;
}
}
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
- ring->id, ipehr, offset);
+ engine->id, ipehr, offset);
return NULL;
}
static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
@@ -2881,11 +2938,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* Therefore, this function does not support execlist mode in its
* current form. Just return NULL and move on.
*/
- if (ring->buffer == NULL)
+ if (engine->buffer == NULL)
return NULL;
- ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
- if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+ ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
return NULL;
/*
@@ -2896,8 +2953,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
- backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
+ head = I915_READ_HEAD(engine) & HEAD_ADDR;
+ backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
for (i = backwards; i; --i) {
/*
@@ -2905,10 +2962,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
- head &= ring->buffer->size - 1;
+ head &= engine->buffer->size - 1;
/* This here seems to blow up */
- cmd = ioread32(ring->buffer->virtual_start + head);
+ cmd = ioread32(engine->buffer->virtual_start + head);
if (cmd == ipehr)
break;
@@ -2918,32 +2975,32 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
if (!i)
return NULL;
- *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- offset = ioread32(ring->buffer->virtual_start + head + 12);
+ *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+ if (INTEL_INFO(engine->dev)->gen >= 8) {
+ offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32;
- offset = ioread32(ring->buffer->virtual_start + head + 8);
+ offset = ioread32(engine->buffer->virtual_start + head + 8);
}
- return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
+ return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
-static int semaphore_passed(struct intel_engine_cs *ring)
+static int semaphore_passed(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller;
u32 seqno;
- ring->hangcheck.deadlock++;
+ engine->hangcheck.deadlock++;
- signaller = semaphore_waits_for(ring, &seqno);
+ signaller = semaphore_waits_for(engine, &seqno);
if (signaller == NULL)
return -1;
/* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+ if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
- if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+ if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
@@ -2956,23 +3013,22 @@ static int semaphore_passed(struct intel_engine_cs *ring)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- ring->hangcheck.deadlock = 0;
+ for_each_engine(engine, dev_priv)
+ engine->hangcheck.deadlock = 0;
}
-static bool subunits_stuck(struct intel_engine_cs *ring)
+static bool subunits_stuck(struct intel_engine_cs *engine)
{
u32 instdone[I915_NUM_INSTDONE_REG];
bool stuck;
int i;
- if (ring->id != RCS)
+ if (engine->id != RCS)
return true;
- i915_get_extra_instdone(ring->dev, instdone);
+ i915_get_extra_instdone(engine->dev, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
@@ -2981,49 +3037,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
*/
stuck = true;
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+ const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
- if (tmp != ring->hangcheck.instdone[i])
+ if (tmp != engine->hangcheck.instdone[i])
stuck = false;
- ring->hangcheck.instdone[i] |= tmp;
+ engine->hangcheck.instdone[i] |= tmp;
}
return stuck;
}
static enum intel_ring_hangcheck_action
-head_stuck(struct intel_engine_cs *ring, u64 acthd)
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- if (acthd != ring->hangcheck.acthd) {
+ if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */
- memset(ring->hangcheck.instdone, 0,
- sizeof(ring->hangcheck.instdone));
-
- if (acthd > ring->hangcheck.max_acthd) {
- ring->hangcheck.max_acthd = acthd;
- return HANGCHECK_ACTIVE;
- }
+ memset(engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
- return HANGCHECK_ACTIVE_LOOP;
+ return HANGCHECK_ACTIVE;
}
- if (!subunits_stuck(ring))
+ if (!subunits_stuck(engine))
return HANGCHECK_ACTIVE;
return HANGCHECK_HUNG;
}
static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_ring_hangcheck_action ha;
u32 tmp;
- ha = head_stuck(ring, acthd);
+ ha = head_stuck(engine, acthd);
if (ha != HANGCHECK_HUNG)
return ha;
@@ -3035,24 +3086,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- tmp = I915_READ_CTL(ring);
+ tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev, false,
+ i915_handle_error(dev, 0,
"Kicking stuck wait on %s",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(ring)) {
+ switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
- i915_handle_error(dev, false,
+ i915_handle_error(dev, 0,
"Kicking stuck semaphore on %s",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
case 0:
return HANGCHECK_WAIT;
@@ -3062,6 +3113,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG;
}
+static unsigned kick_waiters(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = to_i915(engine->dev);
+ unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+
+ if (engine->hangcheck.user_interrupts == user_interrupts &&
+ !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
+ if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ engine->name);
+ else
+ DRM_INFO("Fake missed irq on %s\n",
+ engine->name);
+ wake_up_all(&engine->irq_queue);
+ }
+
+ return user_interrupts;
+}
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3076,13 +3145,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int busy_count = 0, rings_hung = 0;
- bool stuck[I915_NUM_RINGS] = { 0 };
+ bool stuck[I915_NUM_ENGINES] = { 0 };
#define BUSY 1
#define KICK 5
#define HUNG 20
+#define ACTIVE_DECAY 15
if (!i915.enable_hangcheck)
return;
@@ -3100,33 +3170,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
u64 acthd;
u32 seqno;
+ unsigned user_interrupts;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
- seqno = ring->get_seqno(ring, false);
- acthd = intel_ring_get_active_head(ring);
-
- if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring, seqno)) {
- ring->hangcheck.action = HANGCHECK_IDLE;
-
- if (waitqueue_active(&ring->irq_queue)) {
- /* Issue a wake-up to catch stuck h/w. */
- if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
- if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
- DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
- ring->name);
- else
- DRM_INFO("Fake missed irq on %s\n",
- ring->name);
- wake_up_all(&ring->irq_queue);
- }
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ acthd = intel_ring_get_active_head(engine);
+ seqno = engine->get_seqno(engine);
+
+ /* Reset stuck interrupts between batch advances */
+ user_interrupts = 0;
+
+ if (engine->hangcheck.seqno == seqno) {
+ if (ring_idle(engine, seqno)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
+ if (waitqueue_active(&engine->irq_queue)) {
/* Safeguard against driver failure */
- ring->hangcheck.score += BUSY;
+ user_interrupts = kick_waiters(engine);
+ engine->hangcheck.score += BUSY;
} else
busy = false;
} else {
@@ -3145,58 +3219,60 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
- ring->hangcheck.action = ring_stuck(ring,
- acthd);
+ engine->hangcheck.action = ring_stuck(engine,
+ acthd);
- switch (ring->hangcheck.action) {
+ switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
- case HANGCHECK_ACTIVE:
break;
- case HANGCHECK_ACTIVE_LOOP:
- ring->hangcheck.score += BUSY;
+ case HANGCHECK_ACTIVE:
+ engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
- ring->hangcheck.score += KICK;
+ engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
- ring->hangcheck.score += HUNG;
- stuck[i] = true;
+ engine->hangcheck.score += HUNG;
+ stuck[id] = true;
break;
}
}
} else {
- ring->hangcheck.action = HANGCHECK_ACTIVE;
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
- if (ring->hangcheck.score > 0)
- ring->hangcheck.score--;
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
- ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+ acthd = 0;
- memset(ring->hangcheck.instdone, 0,
- sizeof(ring->hangcheck.instdone));
+ memset(engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
}
- ring->hangcheck.seqno = seqno;
- ring->hangcheck.acthd = acthd;
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
+ engine->hangcheck.user_interrupts = user_interrupts;
busy_count += busy;
}
- for_each_ring(ring, dev_priv, i) {
- if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ for_each_engine_id(engine, dev_priv, id) {
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
- stuck[i] ? "stuck" : "no progress",
- ring->name);
- rings_hung++;
+ stuck[id] ? "stuck" : "no progress",
+ engine->name);
+ rings_hung |= intel_engine_flag(engine);
}
}
if (rings_hung) {
- i915_handle_error(dev, true, "Ring hung");
+ i915_handle_error(dev, rings_hung, "Engine(s) hung");
goto out;
}
@@ -3267,6 +3343,55 @@ static void gen5_gt_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN6_PM);
}
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ else
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+
+ i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(PIPESTAT(pipe),
+ PIPE_FIFO_UNDERRUN_STATUS |
+ PIPESTAT_INT_STATUS_MASK);
+ dev_priv->pipestat_irq_mask[pipe] = 0;
+ }
+
+ GEN5_IRQ_RESET(VLV_);
+ dev_priv->irq_mask = ~0;
+}
+
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ u32 pipestat_mask;
+ u32 enable_mask;
+ enum pipe pipe;
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+
+ WARN_ON(dev_priv->irq_mask != ~0);
+
+ dev_priv->irq_mask = ~enable_mask;
+
+ GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+}
+
/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
@@ -3284,34 +3409,19 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
-static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
-
- i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- GEN5_IRQ_RESET(VLV_);
-}
-
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* VLV magic */
- I915_WRITE(VLV_IMR, 0);
- I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
- I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
- I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+ I915_WRITE(VLV_MASTER_IER, 0);
+ POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev);
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
-
- vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3384,9 +3494,10 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
-
- vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
@@ -3506,6 +3617,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
PORTA_HOTPLUG_ENABLE;
+
+ DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
+ hotplug, enabled_irqs);
+ hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
+
+ /*
+ * For BXT invert bit has to be set based on AOB design
+ * for HPD detection logic, update it based on VBT fields.
+ */
+
+ if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
+ intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
+ hotplug |= BXT_DDIA_HPD_INVERT;
+ if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
+ intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
+ hotplug |= BXT_DDIB_HPD_INVERT;
+ if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
+ intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
+ hotplug |= BXT_DDIC_HPD_INVERT;
+
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
@@ -3613,74 +3744,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
-{
- u32 pipestat_mask;
- u32 iir_mask;
- enum pipe pipe;
-
- pipestat_mask = PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), pipestat_mask);
- POSTING_READ(PIPESTAT(PIPE_A));
-
- pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
-
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
-
- iir_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
- iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
- dev_priv->irq_mask &= ~iir_mask;
-
- I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- POSTING_READ(VLV_IMR);
-}
-
-static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
-{
- u32 pipestat_mask;
- u32 iir_mask;
- enum pipe pipe;
-
- iir_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
- iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-
- dev_priv->irq_mask |= iir_mask;
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IIR, iir_mask);
- POSTING_READ(VLV_IIR);
-
- pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
-
- i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
-
- pipestat_mask = PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), pipestat_mask);
- POSTING_READ(PIPESTAT(PIPE_A));
-}
-
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -3690,8 +3753,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = true;
- if (intel_irqs_enabled(dev_priv))
- valleyview_display_irqs_install(dev_priv);
+ if (intel_irqs_enabled(dev_priv)) {
+ vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
+ }
}
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
@@ -3704,45 +3769,23 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- valleyview_display_irqs_uninstall(dev_priv);
+ vlv_display_irq_reset(dev_priv);
}
-static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- dev_priv->irq_mask = ~0;
-
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- POSTING_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- POSTING_READ(VLV_IMR);
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_install(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_display_irq_postinstall(dev_priv);
-
gen5_gt_irq_postinstall(dev);
- /* ack & enable invalid PTE error interrupts */
-#if 0 /* FIXME: add support to irq handler for checking these bits */
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
- I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
-#endif
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_postinstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(VLV_MASTER_IER);
return 0;
}
@@ -3753,7 +3796,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
@@ -3765,6 +3807,9 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
+ if (HAS_L3_DPF(dev_priv))
+ gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
@@ -3832,7 +3877,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ibx_irq_postinstall(dev);
- I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
return 0;
@@ -3842,11 +3887,14 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_display_irq_postinstall(dev_priv);
-
gen8_gt_irq_postinstall(dev_priv);
- I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_postinstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
return 0;
@@ -3862,20 +3910,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
-static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
-{
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_uninstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- vlv_display_irq_reset(dev_priv);
-
- dev_priv->irq_mask = ~0;
-}
-
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3884,12 +3918,16 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
return;
I915_WRITE(VLV_MASTER_IER, 0);
+ POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev);
I915_WRITE(HWSTAM, 0xffffffff);
- vlv_display_irq_uninstall(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
@@ -3906,7 +3944,10 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- vlv_display_irq_uninstall(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -4044,7 +4085,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4233,14 +4274,17 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* Consume port. Then clear IIR or we'll miss events */
if (I915_HAS_HOTPLUG(dev) &&
- iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
+ iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+ }
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4463,16 +4507,19 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+ }
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4567,8 +4614,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
-
if (IS_GEN2(dev_priv)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4616,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev))
+ else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 278c9c40c..1779f02e6 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = {
.edp_vswing = 0,
.enable_guc_submission = false,
.guc_log_level = -1,
+ .enable_dp_mst = true,
+ .inject_load_failure = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)")
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
+
+module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
+MODULE_PARM_DESC(enable_dp_mst,
+ "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
+MODULE_PARM_DESC(inject_load_failure,
+ "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index bd5026b15..02bc27804 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -49,6 +49,7 @@ struct i915_params {
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
+ unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
@@ -59,6 +60,7 @@ struct i915_params {
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
+ bool enable_dp_mst;
};
extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3d13d0e55..720aab9a5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* PCI config space */
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4 * 4096)
+
+#define DEVEN 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+#define BSM 0x5c
+#define BSM_MASK (0xFFFF << 20)
+
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
@@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GC_CLOCK_166_266 (6 << 0)
#define GC_CLOCK_166_250 (7 << 0)
+#define I915_GDRST 0xc0 /* PCI config register */
+#define GRDOM_FULL (0 << 2)
+#define GRDOM_RENDER (1 << 2)
+#define GRDOM_MEDIA (3 << 2)
+#define GRDOM_MASK (3 << 2)
+#define GRDOM_RESET_STATUS (1 << 1)
+#define GRDOM_RESET_ENABLE (1 << 0)
+
+#define GCDGMBUS 0xcc
+
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
@@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define GCDGMBUS 0xcc
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+#define ASLE 0xe4
+#define ASLS 0xfc
+
+#define SWSCI 0xe8
+#define SWSCI_SCISEL (1 << 15)
+#define SWSCI_GSSCIE (1 << 0)
+
+#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
-/* Graphics reset regs */
-#define I915_GDRST 0xc0 /* PCI config register */
-#define GRDOM_FULL (0<<2)
-#define GRDOM_RENDER (1<<2)
-#define GRDOM_MEDIA (3<<2)
-#define GRDOM_MASK (3<<2)
-#define GRDOM_RESET_STATUS (1<<1)
-#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GRDOM_FULL (0<<1)
@@ -164,6 +182,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
+#define GEN6_GRDOM_VECS (1 << 4)
+#define GEN9_GRDOM_GUC (1 << 5)
+#define GEN8_GRDOM_MEDIA2 (1 << 7)
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
@@ -199,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define GEN8_CONFIG0 _MMIO(0xD00)
+#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -586,6 +610,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
+/* There are the 16 64-bit CS General Purpose Registers */
+#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8)
+#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4)
+
#define OACONTROL _MMIO(0x2360)
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
@@ -621,6 +649,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_GPIO_SC 0x48
#define IOSF_PORT_GPIO_SUS 0xa8
#define IOSF_PORT_CCU 0xa9
+#define CHV_IOSF_PORT_GPIO_N 0x13
+#define CHV_IOSF_PORT_GPIO_SE 0x48
+#define CHV_IOSF_PORT_GPIO_E 0xa8
+#define CHV_IOSF_PORT_GPIO_SW 0xb2
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
@@ -785,7 +817,9 @@ enum skl_disp_power_wells {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_CZ_CLOCK_CONTROL 0x62
+#define CCK_GPLL_CLOCK_CONTROL 0x67
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c
#define CCK_TRUNK_FORCE_ON (1 << 17)
#define CCK_TRUNK_FORCE_OFF (1 << 16)
#define CCK_FREQUENCY_STATUS (0x1f << 8)
@@ -1317,6 +1351,7 @@ enum skl_disp_power_wells {
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
+#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
_PORT_CL1CM_DW0_A)
@@ -1361,14 +1396,10 @@ enum skl_disp_power_wells {
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
-/*
- * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
- * after testing.
- */
-#define GRC_CODE_SHIFT 23
-#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
+#define GRC_CODE_SHIFT 24
+#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
#define GRC_CODE_FAST_SHIFT 16
-#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
+#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
@@ -1641,6 +1672,9 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -1776,6 +1810,22 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
+/* WaClearTdlStateAckDirtyBits */
+#define GEN8_STATE_ACK _MMIO(0x20F0)
+#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
+#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
+#define GEN9_STATE_ACK_TDL0 (1 << 12)
+#define GEN9_STATE_ACK_TDL1 (1 << 13)
+#define GEN9_STATE_ACK_TDL2 (1 << 14)
+#define GEN9_STATE_ACK_TDL3 (1 << 15)
+#define GEN9_SUBSLICE_TDL_ACK_BITS \
+ (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
+ GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
+
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
@@ -1795,6 +1845,7 @@ enum skl_disp_power_wells {
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
@@ -2159,6 +2210,8 @@ enum skl_disp_power_wells {
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
+#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -2923,6 +2976,15 @@ enum skl_disp_power_wells {
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
+#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
+#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
+#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
+#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
+ (IS_BROXTON(dev_priv) ? \
+ INTERVAL_0_833_TO_US(interval) : \
+ INTERVAL_1_33_TO_US(interval)) : \
+ INTERVAL_1_28_TO_US(interval))
+
/*
* Logical Context regs
*/
@@ -4784,6 +4846,10 @@ enum skl_disp_power_wells {
#define CBR_PND_DEADLINE_DISABLE (1<<31)
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
+#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define CBR_DPLLBMD_PIPE_C (1<<29)
+#define CBR_DPLLBMD_PIPE_B (1<<18)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
@@ -5977,6 +6043,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -5985,6 +6052,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
@@ -5998,6 +6066,9 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define MASK_WAKEMEM (1<<13)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
@@ -6015,6 +6086,7 @@ enum skl_disp_power_wells {
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
/* GEN7 chicken */
@@ -6022,6 +6094,7 @@ enum skl_disp_power_wells {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN _MMIO(0x7018)
@@ -6184,6 +6257,7 @@ enum skl_disp_power_wells {
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
+#define BXT_DDIA_HPD_INVERT (1 << 27)
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
@@ -6199,6 +6273,7 @@ enum skl_disp_power_wells {
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_INVERT (1 << 11)
#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
@@ -6209,6 +6284,7 @@ enum skl_disp_power_wells {
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_INVERT (1 << 3)
#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
@@ -6218,6 +6294,9 @@ enum skl_disp_power_wells {
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
+ BXT_DDIB_HPD_INVERT | \
+ BXT_DDIC_HPD_INVERT)
#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
#define PORTE_HOTPLUG_ENABLE (1 << 4)
@@ -6836,6 +6915,8 @@ enum skl_disp_power_wells {
#define VLV_SPAREG2H _MMIO(0xA194)
#define GTFIFODBG _MMIO(0x120000)
+#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
+#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
#define GT_FIFO_SBDROPERR (1<<6)
#define GT_FIFO_BLOBDROPERR (1<<5)
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
@@ -6852,10 +6933,14 @@ enum skl_disp_power_wells {
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
-#define HSW_EDRAM_PRESENT _MMIO(0x120010)
+#define HSW_EDRAM_CAP _MMIO(0x120010)
#define EDRAM_ENABLED 0x1
+#define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf)
+#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
+#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -6872,6 +6957,7 @@ enum skl_disp_power_wells {
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
@@ -7114,6 +7200,7 @@ enum skl_disp_power_wells {
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
+#define FLOW_CONTROL_ENABLE (1<<15)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
@@ -7135,6 +7222,7 @@ enum skl_disp_power_wells {
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -7374,9 +7462,11 @@ enum skl_disp_power_wells {
/* SBI offsets */
#define SBI_SSCDIVINTPHASE 0x0200
#define SBI_SSCDIVINTPHASE6 0x0600
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
@@ -7386,6 +7476,8 @@ enum skl_disp_power_wells {
#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4)
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
#define SBI_GEN0 0x1f00
@@ -7665,6 +7757,59 @@ enum skl_disp_power_wells {
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* pipe degamma/gamma LUTs on IVB+ */
+#define _PAL_PREC_INDEX_A 0x4A400
+#define _PAL_PREC_INDEX_B 0x4AC00
+#define _PAL_PREC_INDEX_C 0x4B400
+#define PAL_PREC_10_12_BIT (0 << 31)
+#define PAL_PREC_SPLIT_MODE (1 << 31)
+#define PAL_PREC_AUTO_INCREMENT (1 << 15)
+#define _PAL_PREC_DATA_A 0x4A404
+#define _PAL_PREC_DATA_B 0x4AC04
+#define _PAL_PREC_DATA_C 0x4B404
+#define _PAL_PREC_GC_MAX_A 0x4A410
+#define _PAL_PREC_GC_MAX_B 0x4AC10
+#define _PAL_PREC_GC_MAX_C 0x4B410
+#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
+#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
+#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
+
+#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
+#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
+#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+
+/* pipe CSC & degamma/gamma LUTs on CHV */
+#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
+#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
+#define _CGM_PIPE_A_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x67908)
+#define _CGM_PIPE_A_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6790C)
+#define _CGM_PIPE_A_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x67910)
+#define _CGM_PIPE_A_DEGAMMA (VLV_DISPLAY_BASE + 0x66000)
+#define _CGM_PIPE_A_GAMMA (VLV_DISPLAY_BASE + 0x67000)
+#define _CGM_PIPE_A_MODE (VLV_DISPLAY_BASE + 0x67A00)
+#define CGM_PIPE_MODE_GAMMA (1 << 2)
+#define CGM_PIPE_MODE_CSC (1 << 1)
+#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
+
+#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
+#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
+#define _CGM_PIPE_B_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x69908)
+#define _CGM_PIPE_B_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6990C)
+#define _CGM_PIPE_B_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x69910)
+#define _CGM_PIPE_B_DEGAMMA (VLV_DISPLAY_BASE + 0x68000)
+#define _CGM_PIPE_B_GAMMA (VLV_DISPLAY_BASE + 0x69000)
+#define _CGM_PIPE_B_MODE (VLV_DISPLAY_BASE + 0x69A00)
+
+#define CGM_PIPE_CSC_COEFF01(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF01, _CGM_PIPE_B_CSC_COEFF01)
+#define CGM_PIPE_CSC_COEFF23(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF23, _CGM_PIPE_B_CSC_COEFF23)
+#define CGM_PIPE_CSC_COEFF45(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF45, _CGM_PIPE_B_CSC_COEFF45)
+#define CGM_PIPE_CSC_COEFF67(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF67, _CGM_PIPE_B_CSC_COEFF67)
+#define CGM_PIPE_CSC_COEFF8(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF8, _CGM_PIPE_B_CSC_COEFF8)
+#define CGM_PIPE_DEGAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_DEGAMMA, _CGM_PIPE_B_DEGAMMA) + (i) * 8 + (w) * 4)
+#define CGM_PIPE_GAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4)
+#define CGM_PIPE_MODE(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE)
+
/* MIPI DSI registers */
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
@@ -7679,58 +7824,62 @@ enum skl_disp_power_wells {
#define BXT_MIPI_DIV_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
BXT_MIPI2_DIV_SHIFT)
-/* Var clock divider to generate TX source. Result must be < 39.5 M */
-#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26)
-#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10)
-#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \
- _MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
- BXT_MIPI2_ESCLK_VAR_DIV_MASK)
-
-#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \
- (val << BXT_MIPI_DIV_SHIFT(port))
+
/* TX control divider to select actual TX clock output from (8x/var) */
-#define BXT_MIPI1_TX_ESCLK_SHIFT 21
-#define BXT_MIPI2_TX_ESCLK_SHIFT 5
+#define BXT_MIPI1_TX_ESCLK_SHIFT 26
+#define BXT_MIPI2_TX_ESCLK_SHIFT 10
#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
BXT_MIPI2_TX_ESCLK_SHIFT)
-#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21)
-#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5)
+#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26)
+#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10)
#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
- BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
-#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \
- (0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \
- (0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \
- (0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-/* RX control divider to select actual RX clock output from 8x*/
-#define BXT_MIPI1_RX_ESCLK_SHIFT 19
-#define BXT_MIPI2_RX_ESCLK_SHIFT 3
-#define BXT_MIPI_RX_ESCLK_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
- BXT_MIPI2_RX_ESCLK_SHIFT)
-#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19)
-#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3)
-#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \
- (3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \
- (1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \
- (2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \
- (3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-/* BXT-A WA: Always prog DPHY dividers to 00 */
-#define BXT_MIPI1_DPHY_DIV_SHIFT 16
-#define BXT_MIPI2_DPHY_DIV_SHIFT 0
-#define BXT_MIPI_DPHY_DIV_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
- BXT_MIPI2_DPHY_DIV_SHIFT)
-#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16)
-#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0)
-#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \
- (3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
+ BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
+#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
+ ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+/* RX upper control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
+#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
+#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21)
+#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5)
+#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
+ ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+/* 8/3X divider to select the actual 8/3X clock output from 8x */
+#define BXT_MIPI1_8X_BY3_SHIFT 19
+#define BXT_MIPI2_8X_BY3_SHIFT 3
+#define BXT_MIPI_8X_BY3_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
+ BXT_MIPI2_8X_BY3_SHIFT)
+#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19)
+#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3)
+#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
+ BXT_MIPI2_8X_BY3_DIVIDER_MASK)
+#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
+ ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+/* RX lower control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
+#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
+#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16)
+#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0)
+#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
+ ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+
+#define RX_DIVIDER_BIT_1_2 0x3
+#define RX_DIVIDER_BIT_3_4 0xC
/* BXT MIPI mode configure */
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
@@ -7755,9 +7904,11 @@ enum skl_disp_power_wells {
#define BXT_DSIC_16X_BY2 (1 << 10)
#define BXT_DSIC_16X_BY3 (2 << 10)
#define BXT_DSIC_16X_BY4 (3 << 10)
+#define BXT_DSIC_16X_MASK (3 << 10)
#define BXT_DSIA_16X_BY2 (1 << 8)
#define BXT_DSIA_16X_BY3 (2 << 8)
#define BXT_DSIA_16X_BY4 (3 << 8)
+#define BXT_DSIA_16X_MASK (3 << 8)
#define BXT_DSI_FREQ_SEL_SHIFT 8
#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
@@ -7892,8 +8043,8 @@ enum skl_disp_power_wells {
#define VID_MODE_FORMAT_MASK (0xf << 7)
#define VID_MODE_NOT_SUPPORTED (0 << 7)
#define VID_MODE_FORMAT_RGB565 (1 << 7)
-#define VID_MODE_FORMAT_RGB666 (2 << 7)
-#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
+#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7)
+#define VID_MODE_FORMAT_RGB666 (3 << 7)
#define VID_MODE_FORMAT_RGB888 (4 << 7)
#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
@@ -8149,6 +8300,7 @@ enum skl_disp_power_wells {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
+#define BXT_PIPE_SELECT_SHIFT 7
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c6188dddb..2d576b7ff 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ intel_runtime_pm_get(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val);
@@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq ||
val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
@@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
return count;
}
@@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ intel_runtime_pm_get(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val);
@@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq ||
val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
@@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
return count;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fa09e5581..dc0def210 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
- __entry->sync_to = to_req->ring->id;
+ __entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
@@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(req);
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ struct intel_engine_cs *engine =
+ i915_gem_request_get_engine(req);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
- i915_trace_irq_get(ring, req);
+ i915_trace_irq_get(engine, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = req->ring->dev->primary->index;
- __entry->ring = req->ring->id;
+ __entry->dev = req->engine->dev->primary->index;
+ __entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
@@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(req);
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ struct intel_engine_cs *engine =
+ i915_gem_request_get_engine(req);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
@@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
);
TRACE_EVENT(i915_gem_request_notify,
- TP_PROTO(struct intel_engine_cs *ring),
- TP_ARGS(ring),
+ TP_PROTO(struct intel_engine_cs *engine),
+ TP_ARGS(engine),
TP_STRUCT__entry(
__field(u32, dev)
@@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify,
),
TP_fast_assign(
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
- __entry->seqno = ring->get_seqno(ring, false);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
+ __entry->seqno = engine->get_seqno(engine);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(req);
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ struct intel_engine_cs *engine =
+ i915_gem_request_get_engine(req);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
- mutex_is_locked(&ring->dev->struct_mutex);
+ mutex_is_locked(&engine->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+ TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
- TP_ARGS(ring, to),
+ TP_ARGS(engine, to),
TP_STRUCT__entry(
__field(u32, ring)
@@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm,
),
TP_fast_assign(
- __entry->ring = ring->id;
+ __entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = ring->dev->primary->index;
+ __entry->dev = engine->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dea7429be..d02efb8ca 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
- unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
- if (mappable_base < ggtt_vm->start ||
- mappable_end > dev_priv->gtt.mappable_end ||
- unmappable_base < dev_priv->gtt.mappable_end ||
- unmappable_end > ggtt_vm_end) {
+ if (mappable_base < ggtt->base.start ||
+ mappable_end > ggtt->mappable_end ||
+ unmappable_base < ggtt->mappable_end ||
+ unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
- if (unmappable_base > dev_priv->gtt.mappable_end) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (unmappable_base > ggtt->mappable_end) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2],
- dev_priv->gtt.mappable_end,
+ ggtt->mappable_end,
unmappable_base);
if (ret)
@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev)
* No need to partition out the last physical page,
* because it is reserved to the guard page.
*/
- if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (unmappable_end < ggtt_end - PAGE_SIZE) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3],
unmappable_end,
- ggtt_vm_end - PAGE_SIZE);
+ ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
- if (mappable_base > ggtt_vm->start) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (mappable_base > ggtt->base.start) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0],
- ggtt_vm->start, mappable_base);
+ ggtt->base.start, mappable_base);
if (ret)
goto err;
}
- if (mappable_end < dev_priv->gtt.mappable_end) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (mappable_end < ggtt->mappable_end) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1],
mappable_end,
- dev_priv->gtt.mappable_end);
+ ggtt->mappable_end);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index e7c1686e4..50ff90aea 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -99,6 +99,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
+ crtc_state->wm.need_postvbl_update = false;
+ crtc_state->fb_bits = 0;
return &crtc_state->base;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 7d281b400..02a7527ce 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -372,7 +372,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
if (WARN_ON(port == PORT_A))
return;
- if (HAS_PCH_IBX(dev_priv->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -561,23 +561,21 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
}
/**
- * intel_init_audio - Set up chip specific audio functions
- * @dev: drm device
+ * intel_init_audio_hooks - Set up chip specific audio hooks
+ * @dev_priv: device private
*/
-void intel_init_audio(struct drm_device *dev)
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
- } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+ } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) {
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index bf62a19c8..b9022fa05 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -29,7 +29,9 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_bios.h"
+
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
/**
* DOC: Video BIOS Table (VBT)
@@ -56,8 +58,6 @@
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
-static int panel_type;
-
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
{
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+ dvo_timing->himage_lo;
+ panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+ dvo_timing->vimage_lo;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -203,17 +208,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
+ int panel_type;
int drrs_mode;
+ int ret;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
- if (lvds_options->panel_type == 0xff)
- return;
- panel_type = lvds_options->panel_type;
+ ret = intel_opregion_get_panel_type(dev_priv->dev);
+ if (ret >= 0) {
+ WARN_ON(ret > 0xf);
+ panel_type = ret;
+ DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+ } else {
+ if (lvds_options->panel_type > 0xf) {
+ DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
+ return;
+ }
+ panel_type = lvds_options->panel_type;
+ DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+ }
+
+ dev_priv->vbt.panel_type = panel_type;
drrs_mode = (lvds_options->dps_panel_type_bits
>> (panel_type * 2)) & MODE_MASK;
@@ -249,7 +269,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
- lvds_options->panel_type);
+ panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
@@ -264,7 +284,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
- lvds_options->panel_type);
+ panel_type);
if (fp_timing) {
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
@@ -282,6 +302,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
+ int panel_type = dev_priv->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
@@ -480,7 +501,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
+ p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!p_mapping->initialized) {
p_mapping->dvo_port = child->dvo_port;
p_mapping->slave_addr = child->slave_addr;
@@ -525,10 +546,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
return;
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->vbt.edp_support = 1;
-
- if (driver->dual_frequency)
- dev_priv->render_reclock_avail = true;
+ dev_priv->vbt.edp.support = 1;
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
@@ -547,23 +565,24 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_link_params *edp_link_params;
+ int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (dev_priv->vbt.edp_support)
+ if (dev_priv->vbt.edp.support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
- dev_priv->vbt.edp_bpp = 18;
+ dev_priv->vbt.edp.bpp = 18;
break;
case EDP_24BPP:
- dev_priv->vbt.edp_bpp = 24;
+ dev_priv->vbt.edp.bpp = 24;
break;
case EDP_30BPP:
- dev_priv->vbt.edp_bpp = 30;
+ dev_priv->vbt.edp.bpp = 30;
break;
}
@@ -571,14 +590,14 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
- dev_priv->vbt.edp_pps = *edp_pps;
+ dev_priv->vbt.edp.pps = *edp_pps;
switch (edp_link_params->rate) {
case EDP_RATE_1_62:
- dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+ dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
break;
case EDP_RATE_2_7:
- dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+ dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
@@ -588,13 +607,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->lanes) {
case EDP_LANE_1:
- dev_priv->vbt.edp_lanes = 1;
+ dev_priv->vbt.edp.lanes = 1;
break;
case EDP_LANE_2:
- dev_priv->vbt.edp_lanes = 2;
+ dev_priv->vbt.edp.lanes = 2;
break;
case EDP_LANE_4:
- dev_priv->vbt.edp_lanes = 4;
+ dev_priv->vbt.edp.lanes = 4;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
@@ -604,16 +623,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case EDP_PREEMPHASIS_3_5dB:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case EDP_PREEMPHASIS_6dB:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case EDP_PREEMPHASIS_9_5dB:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
@@ -623,16 +642,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case EDP_VSWING_0_6V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case EDP_VSWING_0_8V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case EDP_VSWING_1_2V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@ -645,10 +664,10 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Don't read from VBT if module parameter has valid value*/
if (i915.edp_vswing) {
- dev_priv->edp_low_vswing = i915.edp_vswing == 1;
+ dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- dev_priv->edp_low_vswing = vswing == 0;
+ dev_priv->vbt.edp.low_vswing = vswing == 0;
}
}
}
@@ -658,6 +677,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
+ int panel_type = dev_priv->vbt.panel_type;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
@@ -704,9 +724,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
+ int panel_type = dev_priv->vbt.panel_type;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!dev_priv->vbt.has_mipi)
+ if (!intel_bios_is_dsi_present(dev_priv, NULL))
return;
/* Initialize this to undefined indicating no generic MIPI support */
@@ -911,6 +932,7 @@ static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
+ int panel_type = dev_priv->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
@@ -1124,7 +1146,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
- if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+ if (bdb->version >= 196 && child->common.iboost) {
info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
@@ -1170,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
}
if (bdb->version < 106) {
expected_size = 22;
- } else if (bdb->version < 109) {
+ } else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
@@ -1232,14 +1254,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
continue;
}
- if (p_child->common.dvo_port >= DVO_PORT_MIPIA
- && p_child->common.dvo_port <= DVO_PORT_MIPID
- &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
- DRM_DEBUG_KMS("Found MIPI as LFP\n");
- dev_priv->vbt.has_mipi = 1;
- dev_priv->vbt.dsi.port = p_child->common.dvo_port;
- }
-
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
@@ -1250,6 +1264,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
*/
memcpy(child_dev_ptr, p_child,
min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
+
+ /*
+ * copied full block, now init values when they are not
+ * available in current version
+ */
+ if (bdb->version < 196) {
+ /* Set default values for bits added from v196 */
+ child_dev_ptr->common.iboost = 0;
+ child_dev_ptr->common.hpd_invert = 0;
+ }
+
+ if (bdb->version < 192)
+ child_dev_ptr->common.lspcon = 0;
}
return;
}
@@ -1431,3 +1458,285 @@ intel_bios_init(struct drm_i915_private *dev_priv)
return 0;
}
+
+/**
+ * intel_bios_is_tv_present - is integrated TV present in VBT
+ * @dev_priv: i915 device instance
+ *
+ * Return true if TV is present. If no child devices were parsed from VBT,
+ * assume TV is present.
+ */
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
+{
+ union child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->vbt.int_tv_support)
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ switch (p_child->old.device_type) {
+ case DEVICE_TYPE_INT_TV:
+ case DEVICE_TYPE_TV:
+ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+ break;
+ default:
+ continue;
+ }
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->old.addin_offset)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_lvds_present - is LVDS present in VBT
+ * @dev_priv: i915 device instance
+ * @i2c_pin: i2c pin for LVDS if present
+ *
+ * Return true if LVDS is present. If no child devices were parsed from VBT,
+ * assume LVDS is present.
+ */
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
+{
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+ struct old_child_dev_config *child = &uchild->old;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ /* FIXME maybe deal with port A as well? */
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_edp - is the device in given port eDP
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is eDP.
+ */
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+{
+ union child_device_config *p_child;
+ static const short port_mapping[] = {
+ [PORT_B] = DVO_PORT_DPB,
+ [PORT_C] = DVO_PORT_DPC,
+ [PORT_D] = DVO_PORT_DPD,
+ [PORT_E] = DVO_PORT_DPE,
+ };
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+
+ if (p_child->common.dvo_port == port_mapping[port] &&
+ (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ /*
+ * Buggy VBTs may declare DP ports as having
+ * HDMI type dvo_port :( So let's check both.
+ */
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_dsi_present - is DSI present in VBT
+ * @dev_priv: i915 device instance
+ * @port: port for DSI if present
+ *
+ * Return true if DSI is present, and return the port in %port.
+ */
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
+ enum port *port)
+{
+ union child_device_config *p_child;
+ u8 dvo_port;
+ int i;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+
+ if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ dvo_port = p_child->common.dvo_port;
+
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ case DVO_PORT_MIPIC:
+ if (port)
+ *port = dvo_port - DVO_PORT_MIPIA;
+ return true;
+ case DVO_PORT_MIPIB:
+ case DVO_PORT_MIPID:
+ DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
+ port_name(dvo_port - DVO_PORT_MIPIA));
+ break;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if HPD should be inverted for %port.
+ */
+bool
+intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+ continue;
+
+ switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ case DVO_PORT_DPA:
+ case DVO_PORT_HDMIA:
+ if (port == PORT_A)
+ return true;
+ break;
+ case DVO_PORT_DPB:
+ case DVO_PORT_HDMIB:
+ if (port == PORT_B)
+ return true;
+ break;
+ case DVO_PORT_DPC:
+ case DVO_PORT_HDMIC:
+ if (port == PORT_C)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 350d4e0f7..ab0ea315e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,543 +19,16 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#ifndef _INTEL_BIOS_H_
-#define _INTEL_BIOS_H_
-
-/**
- * struct vbt_header - VBT Header structure
- * @signature: VBT signature, always starts with "$VBT"
- * @version: Version of this structure
- * @header_size: Size of this structure
- * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
- * @vbt_checksum: Checksum
- * @reserved0: Reserved
- * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
- * @aim_offset: Offsets of add-in data blocks from beginning of VBT
- */
-struct vbt_header {
- u8 signature[20];
- u16 version;
- u16 header_size;
- u16 vbt_size;
- u8 vbt_checksum;
- u8 reserved0;
- u32 bdb_offset;
- u32 aim_offset[4];
-} __packed;
-
-/**
- * struct bdb_header - BDB Header structure
- * @signature: BDB signature "BIOS_DATA_BLOCK"
- * @version: Version of the data block definitions
- * @header_size: Size of this structure
- * @bdb_size: Size of BDB (BDB Header and data blocks)
- */
-struct bdb_header {
- u8 signature[16];
- u16 version;
- u16 header_size;
- u16 bdb_size;
-} __packed;
-
-/* strictly speaking, this is a "skip" block, but it has interesting info */
-struct vbios_data {
- u8 type; /* 0 == desktop, 1 == mobile */
- u8 relstage;
- u8 chipset;
- u8 lvds_present:1;
- u8 tv_present:1;
- u8 rsvd2:6; /* finish byte */
- u8 rsvd3[4];
- u8 signon[155];
- u8 copyright[61];
- u16 code_segment;
- u8 dos_boot_mode;
- u8 bandwidth_percent;
- u8 rsvd4; /* popup memory size */
- u8 resize_pci_bios;
- u8 rsvd5; /* is crt already on ddc2 */
-} __packed;
-
-/*
- * There are several types of BIOS data blocks (BDBs), each block has
- * an ID and size in the first 3 bytes (ID in first, size in next 2).
- * Known types are listed below.
*/
-#define BDB_GENERAL_FEATURES 1
-#define BDB_GENERAL_DEFINITIONS 2
-#define BDB_OLD_TOGGLE_LIST 3
-#define BDB_MODE_SUPPORT_LIST 4
-#define BDB_GENERIC_MODE_TABLE 5
-#define BDB_EXT_MMIO_REGS 6
-#define BDB_SWF_IO 7
-#define BDB_SWF_MMIO 8
-#define BDB_PSR 9
-#define BDB_MODE_REMOVAL_TABLE 10
-#define BDB_CHILD_DEVICE_TABLE 11
-#define BDB_DRIVER_FEATURES 12
-#define BDB_DRIVER_PERSISTENCE 13
-#define BDB_EXT_TABLE_PTRS 14
-#define BDB_DOT_CLOCK_OVERRIDE 15
-#define BDB_DISPLAY_SELECT 16
-/* 17 rsvd */
-#define BDB_DRIVER_ROTATION 18
-#define BDB_DISPLAY_REMOVE 19
-#define BDB_OEM_CUSTOM 20
-#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
-#define BDB_SDVO_LVDS_OPTIONS 22
-#define BDB_SDVO_PANEL_DTDS 23
-#define BDB_SDVO_LVDS_PNP_IDS 24
-#define BDB_SDVO_LVDS_POWER_SEQ 25
-#define BDB_TV_OPTIONS 26
-#define BDB_EDP 27
-#define BDB_LVDS_OPTIONS 40
-#define BDB_LVDS_LFP_DATA_PTRS 41
-#define BDB_LVDS_LFP_DATA 42
-#define BDB_LVDS_BACKLIGHT 43
-#define BDB_LVDS_POWER 44
-#define BDB_MIPI_CONFIG 52
-#define BDB_MIPI_SEQUENCE 53
-#define BDB_SKIP 254 /* VBIOS private block, ignore */
-
-struct bdb_general_features {
- /* bits 1 */
- u8 panel_fitting:2;
- u8 flexaim:1;
- u8 msg_enable:1;
- u8 clear_screen:3;
- u8 color_flip:1;
-
- /* bits 2 */
- u8 download_ext_vbt:1;
- u8 enable_ssc:1;
- u8 ssc_freq:1;
- u8 enable_lfp_on_override:1;
- u8 disable_ssc_ddt:1;
- u8 rsvd7:1;
- u8 display_clock_mode:1;
- u8 rsvd8:1; /* finish byte */
-
- /* bits 3 */
- u8 disable_smooth_vision:1;
- u8 single_dvi:1;
- u8 rsvd9:1;
- u8 fdi_rx_polarity_inverted:1;
- u8 rsvd10:4; /* finish byte */
-
- /* bits 4 */
- u8 legacy_monitor_detect;
-
- /* bits 5 */
- u8 int_crt_support:1;
- u8 int_tv_support:1;
- u8 int_efp_support:1;
- u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
- u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
- u8 rsvd11:3; /* finish byte */
-} __packed;
-
-/* pre-915 */
-#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
-#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
-#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
-#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
-
-/* Pre 915 */
-#define DEVICE_TYPE_NONE 0x00
-#define DEVICE_TYPE_CRT 0x01
-#define DEVICE_TYPE_TV 0x09
-#define DEVICE_TYPE_EFP 0x12
-#define DEVICE_TYPE_LFP 0x22
-/* On 915+ */
-#define DEVICE_TYPE_CRT_DPMS 0x6001
-#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
-#define DEVICE_TYPE_TV_COMPOSITE 0x0209
-#define DEVICE_TYPE_TV_MACROVISION 0x0289
-#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
-#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
-#define DEVICE_TYPE_TV_SCART 0x0209
-#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
-#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
-#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
-#define DEVICE_TYPE_EFP_DVI_I 0x6053
-#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
-#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
-#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
-#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
-#define DEVICE_TYPE_LFP_PANELLINK 0x5012
-#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
-#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
-#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
-#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
-
-#define DEVICE_CFG_NONE 0x00
-#define DEVICE_CFG_12BIT_DVOB 0x01
-#define DEVICE_CFG_12BIT_DVOC 0x02
-#define DEVICE_CFG_24BIT_DVOBC 0x09
-#define DEVICE_CFG_24BIT_DVOCB 0x0a
-#define DEVICE_CFG_DUAL_DVOB 0x11
-#define DEVICE_CFG_DUAL_DVOC 0x12
-#define DEVICE_CFG_DUAL_DVOBC 0x13
-#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
-#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
-
-#define DEVICE_WIRE_NONE 0x00
-#define DEVICE_WIRE_DVOB 0x01
-#define DEVICE_WIRE_DVOC 0x02
-#define DEVICE_WIRE_DVOBC 0x03
-#define DEVICE_WIRE_DVOBB 0x05
-#define DEVICE_WIRE_DVOCC 0x06
-#define DEVICE_WIRE_DVOB_MASTER 0x0d
-#define DEVICE_WIRE_DVOC_MASTER 0x0e
-
-#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
-#define DEVICE_PORT_DVOB 0x01
-#define DEVICE_PORT_DVOC 0x02
/*
- * We used to keep this struct but without any version control. We should avoid
- * using it in the future, but it should be safe to keep using it in the old
- * code. Do not change; we rely on its size.
+ * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
+ * the VBT from the rest of the driver. Add the parsed, clean data to struct
+ * intel_vbt_data within struct drm_i915_private.
*/
-struct old_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 device_id[10]; /* ascii string */
- u16 addin_offset;
- u8 dvo_port; /* See Device_PORT_* above */
- u8 i2c_pin;
- u8 slave_addr;
- u8 ddc_pin;
- u16 edid_ptr;
- u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 dvo2_port;
- u8 i2c2_pin;
- u8 slave2_addr;
- u8 ddc2_pin;
- u8 capabilities;
- u8 dvo_wiring;/* See DEVICE_WIRE_* above */
- u8 dvo2_wiring;
- u16 extended_type;
- u8 dvo_function;
-} __packed;
-
-/* This one contains field offsets that are known to be common for all BDB
- * versions. Notice that the meaning of the contents contents may still change,
- * but at least the offsets are consistent. */
-
-/* Definitions for flags_1 */
-#define IBOOST_ENABLE (1<<3)
-
-struct common_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 not_common1[12];
- u8 dvo_port;
- u8 not_common2[2];
- u8 ddc_pin;
- u16 edid_ptr;
- u8 obsolete;
- u8 flags_1;
- u8 not_common3[13];
- u8 iboost_level;
-} __packed;
-
-
-/* This field changes depending on the BDB version, so the most reliable way to
- * read it is by checking the BDB version and reading the raw pointer. */
-union child_device_config {
- /* This one is safe to be used anywhere, but the code should still check
- * the BDB version. */
- u8 raw[33];
- /* This one should only be kept for legacy code. */
- struct old_child_dev_config old;
- /* This one should also be safe to use anywhere, even without version
- * checks. */
- struct common_child_dev_config common;
-} __packed;
-
-struct bdb_general_definitions {
- /* DDC GPIO */
- u8 crt_ddc_gmbus_pin;
-
- /* DPMS bits */
- u8 dpms_acpi:1;
- u8 skip_boot_crt_detect:1;
- u8 dpms_aim:1;
- u8 rsvd1:5; /* finish byte */
-
- /* boot device bits */
- u8 boot_display[2];
- u8 child_dev_size;
-
- /*
- * Device info:
- * If TV is present, it'll be at devices[0].
- * LVDS will be next, either devices[0] or [1], if present.
- * On some platforms the number of device is 6. But could be as few as
- * 4 if both TV and LVDS are missing.
- * And the device num is related with the size of general definition
- * block. It is obtained by using the following formula:
- * number = (block_size - sizeof(bdb_general_definitions))/
- * defs->child_dev_size;
- */
- uint8_t devices[0];
-} __packed;
-
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
-struct bdb_lvds_options {
- u8 panel_type;
- u8 rsvd1;
- /* LVDS capabilities, stored in a dword */
- u8 pfit_mode:2;
- u8 pfit_text_mode_enhanced:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_ratio_auto:1;
- u8 pixel_dither:1;
- u8 lvds_edid:1;
- u8 rsvd2:1;
- u8 rsvd4;
- /* LVDS Panel channel bits stored here */
- u32 lvds_panel_channel_bits;
- /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
- u16 ssc_bits;
- u16 ssc_freq;
- u16 ssc_ddt;
- /* Panel color depth defined here */
- u16 panel_color_depth;
- /* LVDS panel type bits stored here */
- u32 dps_panel_type_bits;
- /* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
-} __packed;
-
-/* LFP pointer table contains entries to the struct below */
-struct bdb_lvds_lfp_data_ptr {
- u16 fp_timing_offset; /* offsets are from start of bdb */
- u8 fp_table_size;
- u16 dvo_timing_offset;
- u8 dvo_table_size;
- u16 panel_pnp_id_offset;
- u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
- struct bdb_lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
- u16 x_res;
- u16 y_res;
- u32 lvds_reg;
- u32 lvds_reg_val;
- u32 pp_on_reg;
- u32 pp_on_reg_val;
- u32 pp_off_reg;
- u32 pp_off_reg_val;
- u32 pp_cycle_reg;
- u32 pp_cycle_reg_val;
- u32 pfit_reg;
- u32 pfit_reg_val;
- u16 terminator;
-} __packed;
-
-struct lvds_dvo_timing {
- u16 clock; /**< In 10khz */
- u8 hactive_lo;
- u8 hblank_lo;
- u8 hblank_hi:4;
- u8 hactive_hi:4;
- u8 vactive_lo;
- u8 vblank_lo;
- u8 vblank_hi:4;
- u8 vactive_hi:4;
- u8 hsync_off_lo;
- u8 hsync_pulse_width;
- u8 vsync_pulse_width:4;
- u8 vsync_off:4;
- u8 rsvd0:6;
- u8 hsync_off_hi:2;
- u8 h_image;
- u8 v_image;
- u8 max_hv;
- u8 h_border;
- u8 v_border;
- u8 rsvd1:3;
- u8 digital:2;
- u8 vsync_positive:1;
- u8 hsync_positive:1;
- u8 rsvd2:1;
-} __packed;
-
-struct lvds_pnp_id {
- u16 mfg_name;
- u16 product_code;
- u32 serial;
- u8 mfg_week;
- u8 mfg_year;
-} __packed;
-
-struct bdb_lvds_lfp_data_entry {
- struct lvds_fp_timing fp_timing;
- struct lvds_dvo_timing dvo_timing;
- struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
- struct bdb_lvds_lfp_data_entry data[16];
-} __packed;
-
-#define BDB_BACKLIGHT_TYPE_NONE 0
-#define BDB_BACKLIGHT_TYPE_PWM 2
-
-struct bdb_lfp_backlight_data_entry {
- u8 type:2;
- u8 active_low_pwm:1;
- u8 obsolete1:5;
- u16 pwm_freq_hz;
- u8 min_brightness;
- u8 obsolete2;
- u8 obsolete3;
-} __packed;
-
-struct bdb_lfp_backlight_data {
- u8 entry_size;
- struct bdb_lfp_backlight_data_entry data[16];
- u8 level[16];
-} __packed;
-
-struct aimdb_header {
- char signature[16];
- char oem_device[20];
- u16 aimdb_version;
- u16 aimdb_header_size;
- u16 aimdb_size;
-} __packed;
-
-struct aimdb_block {
- u8 aimdb_id;
- u16 aimdb_size;
-} __packed;
-struct vch_panel_data {
- u16 fp_timing_offset;
- u8 fp_timing_size;
- u16 dvo_timing_offset;
- u8 dvo_timing_size;
- u16 text_fitting_offset;
- u8 text_fitting_size;
- u16 graphics_fitting_offset;
- u8 graphics_fitting_size;
-} __packed;
-
-struct vch_bdb_22 {
- struct aimdb_block aimdb_block;
- struct vch_panel_data panels[16];
-} __packed;
-
-struct bdb_sdvo_lvds_options {
- u8 panel_backlight;
- u8 h40_set_panel_type;
- u8 panel_type;
- u8 ssc_clk_freq;
- u16 als_low_trip;
- u16 als_high_trip;
- u8 sclalarcoeff_tab_row_num;
- u8 sclalarcoeff_tab_row_size;
- u8 coefficient[8];
- u8 panel_misc_bits_1;
- u8 panel_misc_bits_2;
- u8 panel_misc_bits_3;
- u8 panel_misc_bits_4;
-} __packed;
-
-
-#define BDB_DRIVER_FEATURE_NO_LVDS 0
-#define BDB_DRIVER_FEATURE_INT_LVDS 1
-#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
-
-struct bdb_driver_features {
- u8 boot_dev_algorithm:1;
- u8 block_display_switch:1;
- u8 allow_display_switch:1;
- u8 hotplug_dvo:1;
- u8 dual_view_zoom:1;
- u8 int15h_hook:1;
- u8 sprite_in_clone:1;
- u8 primary_lfp_id:1;
-
- u16 boot_mode_x;
- u16 boot_mode_y;
- u8 boot_mode_bpp;
- u8 boot_mode_refresh;
-
- u16 enable_lfp_primary:1;
- u16 selective_mode_pruning:1;
- u16 dual_frequency:1;
- u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
- u16 nt_clone_support:1;
- u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
- u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
- u16 cui_aspect_scaling:1;
- u16 preserve_aspect_ratio:1;
- u16 sdvo_device_power_down:1;
- u16 crt_hotplug:1;
- u16 lvds_config:2;
- u16 tv_hotplug:1;
- u16 hdmi_config:2;
-
- u8 static_display:1;
- u8 reserved2:7;
- u16 legacy_crt_max_x;
- u16 legacy_crt_max_y;
- u8 legacy_crt_max_refresh;
-
- u8 hdmi_termination;
- u8 custom_vbt_version;
- /* Driver features data block */
- u16 rmpm_enabled:1;
- u16 s2ddt_enabled:1;
- u16 dpst_enabled:1;
- u16 bltclt_enabled:1;
- u16 adb_enabled:1;
- u16 drrs_enabled:1;
- u16 grs_enabled:1;
- u16 gpmt_enabled:1;
- u16 tbt_enabled:1;
- u16 psr_enabled:1;
- u16 ips_enabled:1;
- u16 reserved3:4;
- u16 pc_feature_valid:1;
-} __packed;
-
-#define EDP_18BPP 0
-#define EDP_24BPP 1
-#define EDP_30BPP 2
-#define EDP_RATE_1_62 0
-#define EDP_RATE_2_7 1
-#define EDP_LANE_1 0
-#define EDP_LANE_2 1
-#define EDP_LANE_4 3
-#define EDP_PREEMPHASIS_NONE 0
-#define EDP_PREEMPHASIS_3_5dB 1
-#define EDP_PREEMPHASIS_6dB 2
-#define EDP_PREEMPHASIS_9_5dB 3
-#define EDP_VSWING_0_4V 0
-#define EDP_VSWING_0_6V 1
-#define EDP_VSWING_0_8V 2
-#define EDP_VSWING_1_2V 3
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
struct edp_power_seq {
u16 t1_t3;
@@ -565,245 +38,37 @@ struct edp_power_seq {
u16 t11_t12;
} __packed;
-struct edp_link_params {
- u8 rate:4;
- u8 lanes:4;
- u8 preemphasis:4;
- u8 vswing:4;
-} __packed;
-
-struct bdb_edp {
- struct edp_power_seq power_seqs[16];
- u32 color_depth;
- struct edp_link_params link_params[16];
- u32 sdrrs_msa_timing_delay;
-
- /* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature;
- u16 edp_t3_optimization;
- u64 edp_vswing_preemph; /* v173 */
-} __packed;
-
-struct psr_table {
- /* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
- u8 feature_bits_rsvd:6;
-
- /* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
- u8 wait_times_rsvd:1;
-
- /* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
-} __packed;
-
-struct bdb_psr {
- struct psr_table psr_table[16];
-} __packed;
-
-/*
- * Driver<->VBIOS interaction occurs through scratch bits in
- * GR18 & SWF*.
- */
-
-/* GR18 bits are set on display switch and hotkey events */
-#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
-#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
-#define GR18_HK_NONE (0x0<<3)
-#define GR18_HK_LFP_STRETCH (0x1<<3)
-#define GR18_HK_TOGGLE_DISP (0x2<<3)
-#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
-#define GR18_HK_POPUP_DISABLED (0x6<<3)
-#define GR18_HK_POPUP_ENABLED (0x7<<3)
-#define GR18_HK_PFIT (0x8<<3)
-#define GR18_HK_APM_CHANGE (0xa<<3)
-#define GR18_HK_MULTIPLE (0xc<<3)
-#define GR18_USER_INT_EN (1<<2)
-#define GR18_A0000_FLUSH_EN (1<<1)
-#define GR18_SMM_EN (1<<0)
-
-/* Set by driver, cleared by VBIOS */
-#define SWF00_YRES_SHIFT 16
-#define SWF00_XRES_SHIFT 0
-#define SWF00_RES_MASK 0xffff
-
-/* Set by VBIOS at boot time and driver at runtime */
-#define SWF01_TV2_FORMAT_SHIFT 8
-#define SWF01_TV1_FORMAT_SHIFT 0
-#define SWF01_TV_FORMAT_MASK 0xffff
-
-#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
-#define SWF10_GTT_OVERRIDE_EN (1<<28)
-#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
-#define SWF10_OLD_TOGGLE 0x0
-#define SWF10_TOGGLE_LIST_1 0x1
-#define SWF10_TOGGLE_LIST_2 0x2
-#define SWF10_TOGGLE_LIST_3 0x3
-#define SWF10_TOGGLE_LIST_4 0x4
-#define SWF10_PANNING_EN (1<<23)
-#define SWF10_DRIVER_LOADED (1<<22)
-#define SWF10_EXTENDED_DESKTOP (1<<21)
-#define SWF10_EXCLUSIVE_MODE (1<<20)
-#define SWF10_OVERLAY_EN (1<<19)
-#define SWF10_PLANEB_HOLDOFF (1<<18)
-#define SWF10_PLANEA_HOLDOFF (1<<17)
-#define SWF10_VGA_HOLDOFF (1<<16)
-#define SWF10_ACTIVE_DISP_MASK 0xffff
-#define SWF10_PIPEB_LFP2 (1<<15)
-#define SWF10_PIPEB_EFP2 (1<<14)
-#define SWF10_PIPEB_TV2 (1<<13)
-#define SWF10_PIPEB_CRT2 (1<<12)
-#define SWF10_PIPEB_LFP (1<<11)
-#define SWF10_PIPEB_EFP (1<<10)
-#define SWF10_PIPEB_TV (1<<9)
-#define SWF10_PIPEB_CRT (1<<8)
-#define SWF10_PIPEA_LFP2 (1<<7)
-#define SWF10_PIPEA_EFP2 (1<<6)
-#define SWF10_PIPEA_TV2 (1<<5)
-#define SWF10_PIPEA_CRT2 (1<<4)
-#define SWF10_PIPEA_LFP (1<<3)
-#define SWF10_PIPEA_EFP (1<<2)
-#define SWF10_PIPEA_TV (1<<1)
-#define SWF10_PIPEA_CRT (1<<0)
-
-#define SWF11_MEMORY_SIZE_SHIFT 16
-#define SWF11_SV_TEST_EN (1<<15)
-#define SWF11_IS_AGP (1<<14)
-#define SWF11_DISPLAY_HOLDOFF (1<<13)
-#define SWF11_DPMS_REDUCED (1<<12)
-#define SWF11_IS_VBE_MODE (1<<11)
-#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
-#define SWF11_DPMS_MASK 0x07
-#define SWF11_DPMS_OFF (1<<2)
-#define SWF11_DPMS_SUSPEND (1<<1)
-#define SWF11_DPMS_STANDBY (1<<0)
-#define SWF11_DPMS_ON 0
-
-#define SWF14_GFX_PFIT_EN (1<<31)
-#define SWF14_TEXT_PFIT_EN (1<<30)
-#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
-#define SWF14_POPUP_EN (1<<28)
-#define SWF14_DISPLAY_HOLDOFF (1<<27)
-#define SWF14_DISP_DETECT_EN (1<<26)
-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
-#define SWF14_DRIVER_STATUS (1<<24)
-#define SWF14_OS_TYPE_WIN9X (1<<23)
-#define SWF14_OS_TYPE_WINNT (1<<22)
-/* 21:19 rsvd */
-#define SWF14_PM_TYPE_MASK 0x00070000
-#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
-#define SWF14_PM_ACPI (0x3 << 16)
-#define SWF14_PM_APM_12 (0x2 << 16)
-#define SWF14_PM_APM_11 (0x1 << 16)
-#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
- /* if GR18 indicates a display switch */
-#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
-#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
-#define SWF14_DS_PIPEB_TV2_EN (1<<13)
-#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
-#define SWF14_DS_PIPEB_LFP_EN (1<<11)
-#define SWF14_DS_PIPEB_EFP_EN (1<<10)
-#define SWF14_DS_PIPEB_TV_EN (1<<9)
-#define SWF14_DS_PIPEB_CRT_EN (1<<8)
-#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
-#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
-#define SWF14_DS_PIPEA_TV2_EN (1<<5)
-#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
-#define SWF14_DS_PIPEA_LFP_EN (1<<3)
-#define SWF14_DS_PIPEA_EFP_EN (1<<2)
-#define SWF14_DS_PIPEA_TV_EN (1<<1)
-#define SWF14_DS_PIPEA_CRT_EN (1<<0)
- /* if GR18 indicates a panel fitting request */
-#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
- /* if GR18 indicates an APM change request */
-#define SWF14_APM_HIBERNATE 0x4
-#define SWF14_APM_SUSPEND 0x3
-#define SWF14_APM_STANDBY 0x1
-#define SWF14_APM_RESTORE 0x0
-
-/* Add the device class for LFP, TV, HDMI */
-#define DEVICE_TYPE_INT_LFP 0x1022
-#define DEVICE_TYPE_INT_TV 0x1009
-#define DEVICE_TYPE_HDMI 0x60D2
-#define DEVICE_TYPE_DP 0x68C6
-#define DEVICE_TYPE_eDP 0x78C6
-
-#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
-#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
-#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
-#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
-#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
-#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
-#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
-#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
-#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
-#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
-#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
-#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
-#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
-#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
-#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP
- * Depending on the system, the other bits may or may not
- * be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_DUAL_CHANNEL | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-/* define the DVO port for HDMI output type */
-#define DVO_B 1
-#define DVO_C 2
-#define DVO_D 3
-
-/* Possible values for the "DVO Port" field for versions >= 155: */
-#define DVO_PORT_HDMIA 0
-#define DVO_PORT_HDMIB 1
-#define DVO_PORT_HDMIC 2
-#define DVO_PORT_HDMID 3
-#define DVO_PORT_LVDS 4
-#define DVO_PORT_TV 5
-#define DVO_PORT_CRT 6
-#define DVO_PORT_DPB 7
-#define DVO_PORT_DPC 8
-#define DVO_PORT_DPD 9
-#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11
-#define DVO_PORT_HDMIE 12
-#define DVO_PORT_MIPIA 21
-#define DVO_PORT_MIPIB 22
-#define DVO_PORT_MIPIC 23
-#define DVO_PORT_MIPID 24
+/* MIPI Sequence Block definitions */
+enum mipi_seq {
+ MIPI_SEQ_END = 0,
+ MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
+ MIPI_SEQ_MAX
+};
-/* Block 52 contains MIPI Panel info
- * 6 such enteries will there. Index into correct
- * entery is based on the panel_index in #40 LFP
- */
-#define MAX_MIPI_CONFIGURATIONS 6
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_END = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_MAX
+};
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
-/*
- * PMIC vs SoC Backlight support specified in pwm_blc
- * field in mipi_config block below.
-*/
-#define PPS_BLC_PMIC 0
-#define PPS_BLC_SOC 1
-
struct mipi_config {
u16 panel_id;
@@ -821,6 +86,8 @@ struct mipi_config {
u32 video_transfer_mode:2;
u32 cabc_supported:1;
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
u32 pwm_blc:1;
/* Bit 13:10 */
@@ -924,12 +191,7 @@ struct mipi_config {
} __packed;
-/* Block 52 contains MIPI configuration block
- * 6 * bdb_mipi_config, followed by 6 pps data
- * block below
- *
- * all delays has a unit of 100us
- */
+/* all delays have a unit of 100us */
struct mipi_pps_data {
u16 panel_on_delay;
u16 bl_enable_delay;
@@ -938,57 +200,4 @@ struct mipi_pps_data {
u16 panel_power_cycle_delay;
} __packed;
-struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-} __packed;
-
-/* Block 53 contains MIPI sequences as needed by the panel
- * for enabling it. This block can be variable in size and
- * can be maximum of 6 blocks
- */
-struct bdb_mipi_sequence {
- u8 version;
- u8 data[0];
-} __packed;
-
-/* MIPI Sequnece Block definitions */
-enum mipi_seq {
- MIPI_SEQ_END = 0,
- MIPI_SEQ_ASSERT_RESET,
- MIPI_SEQ_INIT_OTP,
- MIPI_SEQ_DISPLAY_ON,
- MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_DEASSERT_RESET,
- MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
- MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
- MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
- MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
- MIPI_SEQ_POWER_ON, /* sequence block v3+ */
- MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
- MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
- MIPI_SEQ_ELEM_END = 0,
- MIPI_SEQ_ELEM_SEND_PKT,
- MIPI_SEQ_ELEM_DELAY,
- MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
- MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
- MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
- MIPI_SEQ_ELEM_MAX
-};
-
-enum mipi_gpio_pin_index {
- MIPI_GPIO_UNDEFINED = 0,
- MIPI_GPIO_PANEL_ENABLE,
- MIPI_GPIO_BL_ENABLE,
- MIPI_GPIO_PWM_ENABLE,
- MIPI_GPIO_RESET_N,
- MIPI_GPIO_PWR_DOWN_R,
- MIPI_GPIO_STDBY_RST_N,
- MIPI_GPIO_MAX
-};
-
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
new file mode 100644
index 000000000..1b3f97449
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+#define CTM_COEFF_SIGN (1ULL << 63)
+
+#define CTM_COEFF_1_0 (1ULL << 32)
+#define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1)
+#define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1)
+#define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1)
+#define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1)
+#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
+#define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1)
+
+#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
+
+#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
+#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
+
+#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
+
+/*
+ * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
+ * format). This macro takes the coefficient we want transformed and the
+ * number of fractional bits.
+ *
+ * We only have a 9 bits precision window which slides depending on the value
+ * of the CTM coefficient and we write the value from bit 3. We also round the
+ * value.
+ */
+#define I9XX_CSC_COEFF_FP(coeff, fbits) \
+ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
+
+#define I9XX_CSC_COEFF_LIMITED_RANGE \
+ I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
+#define I9XX_CSC_COEFF_1_0 \
+ ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+
+static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+{
+ return !state->degamma_lut &&
+ !state->ctm &&
+ state->gamma_lut &&
+ state->gamma_lut->length == LEGACY_LUT_LENGTH;
+}
+
+/*
+ * When using limited range, multiply the matrix given by userspace by
+ * the matrix that we would use for the limited range. We do the
+ * multiplication in U2.30 format.
+ */
+static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+{
+ int i;
+
+ for (i = 0; i < 9; i++)
+ result[i] = 0;
+
+ for (i = 0; i < 3; i++) {
+ int64_t user_coeff = input[i * 3 + i];
+ uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
+ uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
+ 0,
+ CTM_COEFF_4_0 - 1) >> 2;
+
+ result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
+ if (CTM_COEFF_NEGATIVE(user_coeff))
+ result[i * 3 + i] |= CTM_COEFF_SIGN;
+ }
+}
+
+/* Set up the pipe CSC unit. */
+static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int i, pipe = intel_crtc->pipe;
+ uint16_t coeffs[9] = { 0, };
+
+ if (crtc_state->ctm) {
+ struct drm_color_ctm *ctm =
+ (struct drm_color_ctm *)crtc_state->ctm->data;
+ uint64_t input[9] = { 0, };
+
+ if (intel_crtc->config->limited_color_range) {
+ ctm_mult_by_limited(input, ctm->matrix);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(input); i++)
+ input[i] = ctm->matrix[i];
+ }
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
+
+ /*
+ * Clamp input value to min/max supported by
+ * hardware.
+ */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[i]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 7);
+ }
+ } else {
+ /*
+ * Load an identity matrix if no coefficients are provided.
+ *
+ * TODO: Check what kind of values actually come out of the
+ * pipe with these coeff/postoff values and adjust to get the
+ * best accuracy. Perhaps we even need to take the bpc value
+ * into consideration.
+ */
+ for (i = 0; i < 3; i++) {
+ if (intel_crtc->config->limited_color_range)
+ coeffs[i * 3 + i] =
+ I9XX_CSC_COEFF_LIMITED_RANGE;
+ else
+ coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
+ }
+ }
+
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (intel_crtc->config->limited_color_range)
+ postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (intel_crtc->config->limited_color_range)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
+/*
+ * Set up the pipe CSC unit on CherryView.
+ */
+static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t mode;
+
+ if (state->ctm) {
+ struct drm_color_ctm *ctm =
+ (struct drm_color_ctm *) state->ctm->data;
+ uint16_t coeffs[9] = { 0, };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ uint64_t abs_coeff =
+ ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] = 1 << 15;
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
+ }
+
+ I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ }
+
+ mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
+ if (!crtc_state_is_legacy(state)) {
+ mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+ }
+ I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+}
+
+void intel_color_set_csc(struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.load_csc_matrix)
+ dev_priv->display.load_csc_matrix(crtc_state);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC. */
+static void i9xx_load_luts_internal(struct drm_crtc *crtc,
+ struct drm_property_blob *blob)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int i;
+
+ if (HAS_GMCH_DISPLAY(dev)) {
+ if (intel_crtc->config->has_dsi_encoder)
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ }
+
+ if (blob) {
+ struct drm_color_lut *lut = (struct drm_color_lut *) blob->data;
+ for (i = 0; i < 256; i++) {
+ uint32_t word =
+ (drm_color_lut_extract(lut[i].red, 8) << 16) |
+ (drm_color_lut_extract(lut[i].green, 8) << 8) |
+ drm_color_lut_extract(lut[i].blue, 8);
+
+ if (HAS_GMCH_DISPLAY(dev))
+ I915_WRITE(PALETTE(pipe, i), word);
+ else
+ I915_WRITE(LGC_PALETTE(pipe, i), word);
+ }
+ } else {
+ for (i = 0; i < 256; i++) {
+ uint32_t word = (i << 16) | (i << 8) | i;
+
+ if (HAS_GMCH_DISPLAY(dev))
+ I915_WRITE(PALETTE(pipe, i), word);
+ else
+ I915_WRITE(LGC_PALETTE(pipe, i), word);
+ }
+ }
+}
+
+static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+{
+ i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
+static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *intel_crtc_state =
+ to_intel_crtc_state(crtc_state);
+ bool reenable_ips = false;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ */
+ if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+ (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(intel_crtc);
+ reenable_ips = true;
+ }
+
+ intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+ I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+
+ i9xx_load_luts(crtc_state);
+
+ if (reenable_ips)
+ hsw_enable_ips(intel_crtc);
+}
+
+/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
+static void broadwell_load_luts(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+
+ if (crtc_state_is_legacy(state)) {
+ haswell_load_luts(state);
+ return;
+ }
+
+ I915_WRITE(PREC_PAL_INDEX(pipe),
+ PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
+
+ if (state->degamma_lut) {
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) state->degamma_lut->data;
+
+ for (i = 0; i < lut_size; i++) {
+ uint32_t word =
+ drm_color_lut_extract(lut[i].red, 10) << 20 |
+ drm_color_lut_extract(lut[i].green, 10) << 10 |
+ drm_color_lut_extract(lut[i].blue, 10);
+
+ I915_WRITE(PREC_PAL_DATA(pipe), word);
+ }
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+
+ I915_WRITE(PREC_PAL_DATA(pipe),
+ (v << 20) | (v << 10) | v);
+ }
+ }
+
+ if (state->gamma_lut) {
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) state->gamma_lut->data;
+
+ for (i = 0; i < lut_size; i++) {
+ uint32_t word =
+ (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+
+ I915_WRITE(PREC_PAL_DATA(pipe), word);
+ }
+
+ /* Program the max register to clamp values > 1.0. */
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
+ drm_color_lut_extract(lut[i].red, 16));
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
+ drm_color_lut_extract(lut[i].green, 16));
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
+ drm_color_lut_extract(lut[i].blue, 16));
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+
+ I915_WRITE(PREC_PAL_DATA(pipe),
+ (v << 20) | (v << 10) | v);
+ }
+
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
+ }
+
+ intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
+ POSTING_READ(GAMMA_MODE(pipe));
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
+
+/* Loads the palette/gamma unit for the CRTC on CherryView. */
+static void cherryview_load_luts(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_color_lut *lut;
+ uint32_t i, lut_size;
+ uint32_t word0, word1;
+
+ if (crtc_state_is_legacy(state)) {
+ /* Turn off degamma/gamma on CGM block. */
+ I915_WRITE(CGM_PIPE_MODE(pipe),
+ (state->ctm ? CGM_PIPE_MODE_CSC : 0));
+ i9xx_load_luts_internal(crtc, state->gamma_lut);
+ return;
+ }
+
+ if (state->degamma_lut) {
+ lut = (struct drm_color_lut *) state->degamma_lut->data;
+ lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ for (i = 0; i < lut_size; i++) {
+ /* Write LUT in U0.14 format. */
+ word0 =
+ (drm_color_lut_extract(lut[i].green, 14) << 16) |
+ drm_color_lut_extract(lut[i].blue, 14);
+ word1 = drm_color_lut_extract(lut[i].red, 14);
+
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0);
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1);
+ }
+ }
+
+ if (state->gamma_lut) {
+ lut = (struct drm_color_lut *) state->gamma_lut->data;
+ lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ for (i = 0; i < lut_size; i++) {
+ /* Write LUT in U0.10 format. */
+ word0 =
+ (drm_color_lut_extract(lut[i].green, 10) << 16) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ word1 = drm_color_lut_extract(lut[i].red, 10);
+
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0);
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
+ }
+ }
+
+ I915_WRITE(CGM_PIPE_MODE(pipe),
+ (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
+ (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
+
+ /*
+ * Also program a linear LUT in the legacy block (behind the
+ * CGM block).
+ */
+ i9xx_load_luts_internal(crtc, NULL);
+}
+
+void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.load_luts(crtc_state);
+}
+
+int intel_color_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->dev;
+ size_t gamma_length, degamma_length;
+
+ degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ sizeof(struct drm_color_lut);
+ gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ sizeof(struct drm_color_lut);
+
+ /*
+ * We allow both degamma & gamma luts at the right size or
+ * NULL.
+ */
+ if ((!crtc_state->degamma_lut ||
+ crtc_state->degamma_lut->length == degamma_length) &&
+ (!crtc_state->gamma_lut ||
+ crtc_state->gamma_lut->length == gamma_length))
+ return 0;
+
+ /*
+ * We also allow no degamma lut and a gamma lut at the legacy
+ * size (256 entries).
+ */
+ if (!crtc_state->degamma_lut &&
+ crtc_state->gamma_lut &&
+ crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+ return 0;
+
+ return -EINVAL;
+}
+
+void intel_color_init(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ if (IS_CHERRYVIEW(dev)) {
+ dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
+ dev_priv->display.load_luts = cherryview_load_luts;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_luts = haswell_load_luts;
+ } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
+ IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+ dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_luts = broadwell_load_luts;
+ } else {
+ dev_priv->display.load_luts = i9xx_load_luts;
+ }
+
+ /* Enable color management support when we have degamma & gamma LUTs. */
+ if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ drm_helper_crtc_enable_color_mgmt(crtc,
+ INTEL_INFO(dev)->color.degamma_lut_size,
+ INTEL_INFO(dev)->color.gamma_lut_size);
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 036429236..3fbb6fc66 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -120,22 +120,16 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- int dotclock;
-
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- dotclock = pipe_config->port_clock;
-
- if (HAS_PCH_SPLIT(dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
intel_ddi_get_config(encoder, pipe_config);
pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
@@ -143,6 +137,8 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+ pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -222,18 +218,26 @@ intel_crt_mode_valid(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
int max_dotclk = to_i915(dev)->max_dotclk_freq;
+ int max_clock;
- int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (IS_GEN2(dev))
- max_clock = 350000;
- else
+ if (HAS_PCH_LPT(dev))
+ max_clock = 180000;
+ else if (IS_VALLEYVIEW(dev))
+ /*
+ * 270 MHz due to current DPLL limits,
+ * DAC limit supposedly 355 MHz.
+ */
+ max_clock = 270000;
+ else if (IS_GEN3(dev) || IS_GEN4(dev))
max_clock = 400000;
+ else
+ max_clock = 350000;
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
@@ -267,15 +271,9 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev)) {
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ if (HAS_DDI(dev))
pipe_config->port_clock = 135000 * 2;
- pipe_config->dpll_hw_state.wrpll = 0;
- pipe_config->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
- }
-
return true;
}
@@ -658,6 +656,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
+ else if (i915.load_detect_test)
+ status = connector_status_disconnected;
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index b1a547378..ecadf8709 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,14 +41,22 @@
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_SKL "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define I915_CSR_BXT "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "/*(DEBLOBBED)*/"
-/*(DEBLOBBED)*/
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -167,12 +175,10 @@ struct stepping_info {
char substepping;
};
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
static const struct stepping_info kbl_stepping_info[] = {
- {'H', '0'}, {'I', '0'}
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
@@ -187,28 +193,49 @@ static const struct stepping_info bxt_stepping_info[] = {
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
-static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
+static const struct stepping_info no_stepping_info = { '*', '*' };
+
+static const struct stepping_info *
+intel_get_stepping_info(struct drm_i915_private *dev_priv)
{
const struct stepping_info *si;
unsigned int size;
- if (IS_KABYLAKE(dev)) {
+ if (IS_KABYLAKE(dev_priv)) {
size = ARRAY_SIZE(kbl_stepping_info);
si = kbl_stepping_info;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
size = ARRAY_SIZE(bxt_stepping_info);
si = bxt_stepping_info;
} else {
- return NULL;
+ size = 0;
}
- if (INTEL_REVID(dev) < size)
- return si + INTEL_REVID(dev);
+ if (INTEL_REVID(dev_priv) < size)
+ return si + INTEL_REVID(dev_priv);
- return NULL;
+ return &no_stepping_info;
+}
+
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+{
+ uint32_t val, mask;
+
+ mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
+
+ if (IS_BROXTON(dev_priv))
+ mask |= DC_STATE_DEBUG_MASK_CORES;
+
+ /* The below bit doesn't need to be cleared ever afterwards */
+ val = I915_READ(DC_STATE_DEBUG);
+ if ((val & mask) != mask) {
+ val |= mask;
+ I915_WRITE(DC_STATE_DEBUG, val);
+ POSTING_READ(DC_STATE_DEBUG);
+ }
}
/**
@@ -219,19 +246,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-bool intel_csr_load_program(struct drm_i915_private *dev_priv)
+void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
- return false;
+ return;
}
if (!dev_priv->csr.dmc_payload) {
DRM_ERROR("Tried to program CSR with empty payload\n");
- return false;
+ return;
}
fw_size = dev_priv->csr.dmc_fw_size;
@@ -245,34 +272,25 @@ bool intel_csr_load_program(struct drm_i915_private *dev_priv)
dev_priv->csr.dc_state = 0;
- return true;
+ gen9_set_dc_state_debugmask(dev_priv);
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{
- struct drm_device *dev = dev_priv->dev;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
- const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
- char stepping, substepping;
+ const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
+ uint32_t required_min_version;
if (!fw)
return NULL;
- if (!stepping_info) {
- DRM_ERROR("Unknown stepping info, firmware loading failed\n");
- return NULL;
- }
-
- stepping = stepping_info->stepping;
- substepping = stepping_info->substepping;
-
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
@@ -284,15 +302,25 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
- csr->version < SKL_CSR_VERSION_REQUIRED) {
- DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
+ if (IS_KABYLAKE(dev_priv)) {
+ required_min_version = KBL_CSR_VERSION_REQUIRED;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ required_min_version = SKL_CSR_VERSION_REQUIRED;
+ } else if (IS_BROXTON(dev_priv)) {
+ required_min_version = BXT_CSR_VERSION_REQUIRED;
+ } else {
+ MISSING_CASE(INTEL_REVID(dev_priv));
+ required_min_version = 0;
+ }
+
+ if (csr->version < required_min_version) {
+ DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
- CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
+ CSR_VERSION_MAJOR(required_min_version),
+ CSR_VERSION_MINOR(required_min_version));
return NULL;
}
@@ -312,11 +340,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Search for dmc_offset to find firware binary. */
for (i = 0; i < package_header->num_entries; i++) {
if (package_header->fw_info[i].substepping == '*' &&
- stepping == package_header->fw_info[i].stepping) {
+ si->stepping == package_header->fw_info[i].stepping) {
dmc_offset = package_header->fw_info[i].offset;
break;
- } else if (stepping == package_header->fw_info[i].stepping &&
- substepping == package_header->fw_info[i].substepping) {
+ } else if (si->stepping == package_header->fw_info[i].stepping &&
+ si->substepping == package_header->fw_info[i].substepping) {
dmc_offset = package_header->fw_info[i].offset;
break;
} else if (package_header->fw_info[i].stepping == '*' &&
@@ -324,7 +352,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
dmc_offset = package_header->fw_info[i].offset;
}
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
+ DRM_ERROR("Firmware not supported for %c stepping\n",
+ si->stepping);
return NULL;
}
readcount += dmc_offset;
@@ -370,9 +399,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
- memcpy(dmc_payload, &fw->data[readcount], nbytes);
-
- return dmc_payload;
+ return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
static void csr_load_work_fn(struct work_struct *work)
@@ -387,18 +414,12 @@ static void csr_load_work_fn(struct work_struct *work)
ret = reject_firmware(&fw, dev_priv->csr.fw_path,
&dev_priv->dev->pdev->dev);
- if (!fw)
- goto out;
-
- dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
- if (!dev_priv->csr.dmc_payload)
- goto out;
+ if (fw)
+ dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
- /* load csr program during system boot, as needed for DC states */
- intel_csr_load_program(dev_priv);
-
-out:
if (dev_priv->csr.dmc_payload) {
+ intel_csr_load_program(dev_priv);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
DRM_INFO("Finished loading %s (v%u.%u)\n",
@@ -431,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
@@ -452,10 +475,50 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
}
/**
+ * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
+ * @dev_priv: i915 drm device
+ *
+ * Prepare the DMC firmware before entering system suspend. This includes
+ * flushing pending work items and releasing any resources acquired during
+ * init.
+ */
+void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_CSR(dev_priv))
+ return;
+
+ flush_work(&dev_priv->csr.work);
+
+ /* Drop the reference held in case DMC isn't loaded. */
+ if (!dev_priv->csr.dmc_payload)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
+ * intel_csr_ucode_resume() - init CSR firmware during system resume
+ * @dev_priv: i915 drm device
+ *
+ * Reinitialize the DMC firmware during system resume, reacquiring any
+ * resources released in intel_csr_ucode_suspend().
+ */
+void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_CSR(dev_priv))
+ return;
+
+ /*
+ * Reacquire the reference to keep RPM disabled in case DMC isn't
+ * loaded.
+ */
+ if (!dev_priv->csr.dmc_payload)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev_priv: i915 drm device.
*
- * Firmmware unloading includes freeing the internal momory and reset the
+ * Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
@@ -463,7 +526,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- flush_work(&dev_priv->csr.work);
+ intel_csr_ucode_suspend(dev_priv);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 50f5b0c97..01e523df3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = enc_to_mst(encoder)->primary;
*port = (*dig_port)->port;
break;
+ default:
+ WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
+ /* fallthrough and treat as unknown */
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
@@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = NULL;
*port = PORT_E;
break;
- default:
- WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
- break;
}
}
@@ -360,7 +360,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (dev_priv->edp_low_vswing) {
+ if (dev_priv->vbt.edp.low_vswing) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
@@ -444,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- if (dev_priv->edp_low_vswing) {
+ if (dev_priv->vbt.edp.low_vswing) {
ddi_translations_edp = bdw_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
} else {
@@ -637,6 +637,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
break;
}
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ POSTING_READ(FDI_RX_CTL(PIPE_A));
+
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
@@ -651,10 +655,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
- rx_ctl_val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
-
/* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -732,160 +732,6 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
}
#define LC_FREQ 2700
-#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
-
-#define P_MIN 2
-#define P_MAX 64
-#define P_INC 2
-
-/* Constraints for PLL good behavior */
-#define REF_MIN 48
-#define REF_MAX 400
-#define VCO_MIN 2400
-#define VCO_MAX 4800
-
-#define abs_diff(a, b) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- (void) (&__a == &__b); \
- __a > __b ? (__a - __b) : (__b - __a); })
-
-struct hsw_wrpll_rnp {
- unsigned p, n2, r2;
-};
-
-static unsigned hsw_wrpll_get_budget_for_freq(int clock)
-{
- unsigned budget;
-
- switch (clock) {
- case 25175000:
- case 25200000:
- case 27000000:
- case 27027000:
- case 37762500:
- case 37800000:
- case 40500000:
- case 40541000:
- case 54000000:
- case 54054000:
- case 59341000:
- case 59400000:
- case 72000000:
- case 74176000:
- case 74250000:
- case 81000000:
- case 81081000:
- case 89012000:
- case 89100000:
- case 108000000:
- case 108108000:
- case 111264000:
- case 111375000:
- case 148352000:
- case 148500000:
- case 162000000:
- case 162162000:
- case 222525000:
- case 222750000:
- case 296703000:
- case 297000000:
- budget = 0;
- break;
- case 233500000:
- case 245250000:
- case 247750000:
- case 253250000:
- case 298000000:
- budget = 1500;
- break;
- case 169128000:
- case 169500000:
- case 179500000:
- case 202000000:
- budget = 2000;
- break;
- case 256250000:
- case 262500000:
- case 270000000:
- case 272500000:
- case 273750000:
- case 280750000:
- case 281250000:
- case 286000000:
- case 291750000:
- budget = 4000;
- break;
- case 267250000:
- case 268500000:
- budget = 5000;
- break;
- default:
- budget = 1000;
- break;
- }
-
- return budget;
-}
-
-static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
- unsigned r2, unsigned n2, unsigned p,
- struct hsw_wrpll_rnp *best)
-{
- uint64_t a, b, c, d, diff, diff_best;
-
- /* No best (r,n,p) yet */
- if (best->p == 0) {
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- return;
- }
-
- /*
- * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
- * freq2k.
- *
- * delta = 1e6 *
- * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
- * freq2k;
- *
- * and we would like delta <= budget.
- *
- * If the discrepancy is above the PPM-based budget, always prefer to
- * improve upon the previous solution. However, if you're within the
- * budget, try to maximize Ref * VCO, that is N / (P * R^2).
- */
- a = freq2k * budget * p * r2;
- b = freq2k * budget * best->p * best->r2;
- diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
- diff_best = abs_diff(freq2k * best->p * best->r2,
- LC_FREQ_2K * best->n2);
- c = 1000000 * diff;
- d = 1000000 * diff_best;
-
- if (a < c && b < d) {
- /* If both are above the budget, pick the closer */
- if (best->p * best->r2 * diff < p * r2 * diff_best) {
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- }
- } else if (a >= c && b < d) {
- /* If A is below the threshold but B is above it? Update. */
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- } else if (a >= c && b >= d) {
- /* Both are below the limit, so pick the higher n2/(r2*r2) */
- if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- }
- }
- /* Otherwise a < c && b >= d, do nothing */
-}
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
@@ -1147,363 +993,20 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
}
-static void
-hsw_ddi_calculate_wrpll(int clock /* in Hz */,
- unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
-{
- uint64_t freq2k;
- unsigned p, n2, r2;
- struct hsw_wrpll_rnp best = { 0, 0, 0 };
- unsigned budget;
-
- freq2k = clock / 100;
-
- budget = hsw_wrpll_get_budget_for_freq(clock);
-
- /* Special case handling for 540 pixel clock: bypass WR PLL entirely
- * and directly pass the LC PLL to it. */
- if (freq2k == 5400000) {
- *n2_out = 2;
- *p_out = 1;
- *r2_out = 2;
- return;
- }
-
- /*
- * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
- * the WR PLL.
- *
- * We want R so that REF_MIN <= Ref <= REF_MAX.
- * Injecting R2 = 2 * R gives:
- * REF_MAX * r2 > LC_FREQ * 2 and
- * REF_MIN * r2 < LC_FREQ * 2
- *
- * Which means the desired boundaries for r2 are:
- * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
- *
- */
- for (r2 = LC_FREQ * 2 / REF_MAX + 1;
- r2 <= LC_FREQ * 2 / REF_MIN;
- r2++) {
-
- /*
- * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
- *
- * Once again we want VCO_MIN <= VCO <= VCO_MAX.
- * Injecting R2 = 2 * R and N2 = 2 * N, we get:
- * VCO_MAX * r2 > n2 * LC_FREQ and
- * VCO_MIN * r2 < n2 * LC_FREQ)
- *
- * Which means the desired boundaries for n2 are:
- * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
- */
- for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
- n2 <= VCO_MAX * r2 / LC_FREQ;
- n2++) {
-
- for (p = P_MIN; p <= P_MAX; p += P_INC)
- hsw_wrpll_update_rnp(freq2k, budget,
- r2, n2, p, &best);
- }
- }
-
- *n2_out = best.n2;
- *p_out = best.p;
- *r2_out = best.r2;
-}
-
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
- int clock = crtc_state->port_clock;
-
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- struct intel_shared_dpll *pll;
- uint32_t val;
- unsigned p, n2, r2;
-
- hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state.wrpll = val;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
- } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
- struct drm_atomic_state *state = crtc_state->base.state;
- struct intel_shared_dpll_config *spll =
- &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
-
- if (spll->crtc_mask &&
- WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
- return false;
-
- crtc_state->shared_dpll = DPLL_ID_SPLL;
- spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
- spll->crtc_mask |= 1 << intel_crtc->pipe;
- }
-
- return true;
-}
-
-struct skl_wrpll_context {
- uint64_t min_deviation; /* current minimal deviation */
- uint64_t central_freq; /* chosen central freq */
- uint64_t dco_freq; /* chosen dco freq */
- unsigned int p; /* chosen divider */
-};
-
-static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
-{
- memset(ctx, 0, sizeof(*ctx));
-
- ctx->min_deviation = U64_MAX;
-}
-
-/* DCO freq must be within +1%/-6% of the DCO central freq */
-#define SKL_DCO_MAX_PDEVIATION 100
-#define SKL_DCO_MAX_NDEVIATION 600
-
-static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
- uint64_t central_freq,
- uint64_t dco_freq,
- unsigned int divider)
-{
- uint64_t deviation;
-
- deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
- central_freq);
-
- /* positive deviation */
- if (dco_freq >= central_freq) {
- if (deviation < SKL_DCO_MAX_PDEVIATION &&
- deviation < ctx->min_deviation) {
- ctx->min_deviation = deviation;
- ctx->central_freq = central_freq;
- ctx->dco_freq = dco_freq;
- ctx->p = divider;
- }
- /* negative deviation */
- } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
- deviation < ctx->min_deviation) {
- ctx->min_deviation = deviation;
- ctx->central_freq = central_freq;
- ctx->dco_freq = dco_freq;
- ctx->p = divider;
- }
-}
-
-static void skl_wrpll_get_multipliers(unsigned int p,
- unsigned int *p0 /* out */,
- unsigned int *p1 /* out */,
- unsigned int *p2 /* out */)
-{
- /* even dividers */
- if (p % 2 == 0) {
- unsigned int half = p / 2;
-
- if (half == 1 || half == 2 || half == 3 || half == 5) {
- *p0 = 2;
- *p1 = 1;
- *p2 = half;
- } else if (half % 2 == 0) {
- *p0 = 2;
- *p1 = half / 2;
- *p2 = 2;
- } else if (half % 3 == 0) {
- *p0 = 3;
- *p1 = half / 3;
- *p2 = 2;
- } else if (half % 7 == 0) {
- *p0 = 7;
- *p1 = half / 7;
- *p2 = 2;
- }
- } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
- *p0 = 3;
- *p1 = 1;
- *p2 = p / 3;
- } else if (p == 5 || p == 7) {
- *p0 = p;
- *p1 = 1;
- *p2 = 1;
- } else if (p == 15) {
- *p0 = 3;
- *p1 = 1;
- *p2 = 5;
- } else if (p == 21) {
- *p0 = 7;
- *p1 = 1;
- *p2 = 3;
- } else if (p == 35) {
- *p0 = 7;
- *p1 = 1;
- *p2 = 5;
- }
-}
-
-struct skl_wrpll_params {
- uint32_t dco_fraction;
- uint32_t dco_integer;
- uint32_t qdiv_ratio;
- uint32_t qdiv_mode;
- uint32_t kdiv;
- uint32_t pdiv;
- uint32_t central_freq;
-};
-
-static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
- uint64_t afe_clock,
- uint64_t central_freq,
- uint32_t p0, uint32_t p1, uint32_t p2)
-{
- uint64_t dco_freq;
-
- switch (central_freq) {
- case 9600000000ULL:
- params->central_freq = 0;
- break;
- case 9000000000ULL:
- params->central_freq = 1;
- break;
- case 8400000000ULL:
- params->central_freq = 3;
- }
-
- switch (p0) {
- case 1:
- params->pdiv = 0;
- break;
- case 2:
- params->pdiv = 1;
- break;
- case 3:
- params->pdiv = 2;
- break;
- case 7:
- params->pdiv = 4;
- break;
- default:
- WARN(1, "Incorrect PDiv\n");
- }
-
- switch (p2) {
- case 5:
- params->kdiv = 0;
- break;
- case 2:
- params->kdiv = 1;
- break;
- case 3:
- params->kdiv = 2;
- break;
- case 1:
- params->kdiv = 3;
- break;
- default:
- WARN(1, "Incorrect KDiv\n");
- }
-
- params->qdiv_ratio = p1;
- params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
-
- dco_freq = p0 * p1 * p2 * afe_clock;
-
- /*
- * Intermediate values are in Hz.
- * Divide by MHz to match bsepc
- */
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
- params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
- params->dco_integer * MHz(1)) * 0x8000, MHz(1));
-}
-
-static bool
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
- struct skl_wrpll_params *wrpll_params)
-{
- uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- uint64_t dco_central_freq[3] = {8400000000ULL,
- 9000000000ULL,
- 9600000000ULL};
- static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
- 24, 28, 30, 32, 36, 40, 42, 44,
- 48, 52, 54, 56, 60, 64, 66, 68,
- 70, 72, 76, 78, 80, 84, 88, 90,
- 92, 96, 98 };
- static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
- static const struct {
- const int *list;
- int n_dividers;
- } dividers[] = {
- { even_dividers, ARRAY_SIZE(even_dividers) },
- { odd_dividers, ARRAY_SIZE(odd_dividers) },
- };
- struct skl_wrpll_context ctx;
- unsigned int dco, d, i;
- unsigned int p0, p1, p2;
-
- skl_wrpll_context_init(&ctx);
-
- for (d = 0; d < ARRAY_SIZE(dividers); d++) {
- for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
- for (i = 0; i < dividers[d].n_dividers; i++) {
- unsigned int p = dividers[d].list[i];
- uint64_t dco_freq = p * afe_clock;
-
- skl_wrpll_try_divider(&ctx,
- dco_central_freq[dco],
- dco_freq,
- p);
- /*
- * Skip the remaining dividers if we're sure to
- * have found the definitive divider, we can't
- * improve a 0 deviation.
- */
- if (ctx.min_deviation == 0)
- goto skip_remaining_dividers;
- }
- }
-
-skip_remaining_dividers:
- /*
- * If a solution is found with an even divider, prefer
- * this one.
- */
- if (d == 0 && ctx.p)
- break;
- }
-
- if (!ctx.p) {
- DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
- return false;
- }
+ struct intel_shared_dpll *pll;
- /*
- * gcc incorrectly analyses that these can be used without being
- * initialized. To be fair, it's hard to guess.
- */
- p0 = p1 = p2 = 0;
- skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state,
+ intel_encoder);
+ if (!pll)
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
- return true;
+ return pll;
}
static bool
@@ -1512,218 +1015,23 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
struct intel_shared_dpll *pll;
- uint32_t ctrl1, cfgcr1, cfgcr2;
- int clock = crtc_state->port_clock;
-
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
-
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- struct skl_wrpll_params wrpll_params = { 0, };
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
- if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
- return false;
-
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
- DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
- wrpll_params.dco_integer;
-
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
- DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
- DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
- DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_DP_MST) {
- switch (crtc_state->port_clock / 2) {
- case 81000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
- break;
- case 135000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
- break;
- case 270000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
- break;
- }
-
- cfgcr1 = cfgcr2 = 0;
- } else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- return true;
- } else
- return false;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
- /* shared DPLL id 0 is DPLL 1 */
- crtc_state->ddi_pll_sel = pll->id + 1;
-
return true;
}
-/* bxt clock parameters */
-struct bxt_clk_div {
- int clock;
- uint32_t p1;
- uint32_t p2;
- uint32_t m2_int;
- uint32_t m2_frac;
- bool m2_frac_en;
- uint32_t n;
-};
-
-/* pre-calculated values for DP linkrates */
-static const struct bxt_clk_div bxt_dp_clk_val[] = {
- {162000, 4, 2, 32, 1677722, 1, 1},
- {270000, 4, 1, 27, 0, 0, 1},
- {540000, 2, 1, 27, 0, 0, 1},
- {216000, 3, 2, 32, 1677722, 1, 1},
- {243000, 4, 1, 24, 1258291, 1, 1},
- {324000, 4, 1, 32, 1677722, 1, 1},
- {432000, 3, 1, 32, 1677722, 1, 1}
-};
-
static bool
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
- struct intel_shared_dpll *pll;
- struct bxt_clk_div clk_div = {0};
- int vco = 0;
- uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t lanestagger;
- int clock = crtc_state->port_clock;
-
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- intel_clock_t best_clock;
-
- /* Calculate HDMI div */
- /*
- * FIXME: tie the following calculation into
- * i9xx_crtc_compute_clock
- */
- if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
- DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- clock, pipe_name(intel_crtc->pipe));
- return false;
- }
-
- clk_div.p1 = best_clock.p1;
- clk_div.p2 = best_clock.p2;
- WARN_ON(best_clock.m1 != 2);
- clk_div.n = best_clock.n;
- clk_div.m2_int = best_clock.m2 >> 22;
- clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
- clk_div.m2_frac_en = clk_div.m2_frac != 0;
-
- vco = best_clock.vco;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP) {
- int i;
-
- clk_div = bxt_dp_clk_val[0];
- for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
- if (bxt_dp_clk_val[i].clock == clock) {
- clk_div = bxt_dp_clk_val[i];
- break;
- }
- }
- vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
- }
-
- if (vco >= 6200000 && vco <= 6700000) {
- prop_coef = 4;
- int_coef = 9;
- gain_ctl = 3;
- targ_cnt = 8;
- } else if ((vco > 5400000 && vco < 6200000) ||
- (vco >= 4800000 && vco < 5400000)) {
- prop_coef = 5;
- int_coef = 11;
- gain_ctl = 3;
- targ_cnt = 9;
- } else if (vco == 5400000) {
- prop_coef = 3;
- int_coef = 8;
- gain_ctl = 1;
- targ_cnt = 9;
- } else {
- DRM_ERROR("Invalid VCO\n");
- return false;
- }
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (clock > 270000)
- lanestagger = 0x18;
- else if (clock > 135000)
- lanestagger = 0x0d;
- else if (clock > 67000)
- lanestagger = 0x07;
- else if (clock > 33000)
- lanestagger = 0x04;
- else
- lanestagger = 0x02;
-
- crtc_state->dpll_hw_state.ebb0 =
- PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
- crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
- crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
- crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
-
- if (clk_div.m2_frac_en)
- crtc_state->dpll_hw_state.pll3 =
- PORT_PLL_M2_FRAC_ENABLE;
-
- crtc_state->dpll_hw_state.pll6 =
- prop_coef | PORT_PLL_INT_COEFF(int_coef);
- crtc_state->dpll_hw_state.pll6 |=
- PORT_PLL_GAIN_CTL(gain_ctl);
-
- crtc_state->dpll_hw_state.pll8 = targ_cnt;
-
- crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
-
- crtc_state->dpll_hw_state.pll10 =
- PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
- | PORT_PLL_DCO_AMP_OVR_EN_H;
-
- crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
-
- crtc_state->dpll_hw_state.pcsdw12 =
- LANESTAGGER_STRAP_OVRD | lanestagger;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- /* shared DPLL id 0 is DPLL A */
- crtc_state->ddi_pll_sel = pll->id;
-
- return true;
+ return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
}
/*
@@ -1761,6 +1069,8 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
uint32_t temp;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+ WARN_ON(transcoder_is_dsi(cpu_transcoder));
+
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config->pipe_bpp) {
case 18:
@@ -2129,7 +1439,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u32 n_entries, i;
uint32_t val;
- if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
+ if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
ddi_translations = bxt_ddi_translations_edp;
} else if (type == INTEL_OUTPUT_DISPLAYPORT
@@ -2267,24 +1577,6 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
uint32_t dpll = pipe_config->ddi_pll_sel;
uint32_t val;
- /*
- * DPLL0 is used for eDP and is the only "private" DPLL (as
- * opposed to shared) on SKL
- */
- if (encoder->type == INTEL_OUTPUT_EDP) {
- WARN_ON(dpll != SKL_DPLL0);
-
- val = I915_READ(DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
- DPLL_CTRL1_SSC(dpll) |
- DPLL_CTRL1_LINK_RATE_MASK(dpll));
- val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
-
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
- }
-
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
@@ -2450,251 +1742,101 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
-static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
- POSTING_READ(WRPLL_CTL(pll->id));
- udelay(20);
-}
-
-static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
- POSTING_READ(SPLL_CTL);
- udelay(20);
-}
-
-static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- uint32_t val;
-
- val = I915_READ(WRPLL_CTL(pll->id));
- I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL(pll->id));
-}
-
-static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- uint32_t val;
-
- val = I915_READ(SPLL_CTL);
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
-}
-
-static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
-
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
- val = I915_READ(WRPLL_CTL(pll->id));
- hw_state->wrpll = val;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
- return val & WRPLL_PLL_ENABLE;
-}
-
-static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
+ if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+ phy);
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ }
- val = I915_READ(SPLL_CTL);
- hw_state->spll = val;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
- return val & SPLL_PLL_ENABLE;
-}
-
-
-static const char * const hsw_ddi_pll_names[] = {
- "WRPLL 1",
- "WRPLL 2",
- "SPLL"
-};
+ if (phy == DPIO_PHY1 &&
+ !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
+ DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
- int i;
+ return false;
+ }
- dev_priv->num_shared_dpll = 3;
+ if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+ phy);
- for (i = 0; i < 2; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
- dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
- dev_priv->shared_dplls[i].get_hw_state =
- hsw_ddi_wrpll_get_hw_state;
+ return false;
}
- /* SPLL is special, but needs to be initialized anyway.. */
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
- dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
- dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
-
+ return true;
}
-static const char * const skl_ddi_pll_names[] = {
- "DPLL 1",
- "DPLL 2",
- "DPLL 3",
-};
-
-struct skl_dpll_regs {
- i915_reg_t ctl, cfgcr1, cfgcr2;
-};
-
-/* this array is indexed by the *shared* pll id */
-static const struct skl_dpll_regs skl_dpll_regs[3] = {
- {
- /* DPLL 1 */
- .ctl = LCPLL2_CTL,
- .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
- .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
- },
- {
- /* DPLL 2 */
- .ctl = WRPLL_CTL(0),
- .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
- .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
- },
- {
- /* DPLL 3 */
- .ctl = WRPLL_CTL(1),
- .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
- .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
- },
-};
-
-static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- uint32_t val;
- unsigned int dpll;
- const struct skl_dpll_regs *regs = skl_dpll_regs;
+ u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
- /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
- dpll = pll->id + 1;
-
- val = I915_READ(DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
- DPLL_CTRL1_LINK_RATE_MASK(dpll));
- val |= pll->config.hw_state.ctrl1 << (dpll * 6);
-
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
-
- I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
- I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
- POSTING_READ(regs[pll->id].cfgcr1);
- POSTING_READ(regs[pll->id].cfgcr2);
-
- /* the enable bit is always bit 31 */
- I915_WRITE(regs[pll->id].ctl,
- I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
-
- if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
- DRM_ERROR("DPLL %d not locked\n", dpll);
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
-static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct skl_dpll_regs *regs = skl_dpll_regs;
-
- /* the enable bit is always bit 31 */
- I915_WRITE(regs[pll->id].ctl,
- I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
- POSTING_READ(regs[pll->id].ctl);
+ if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+ DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
-static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
- unsigned int dpll;
- const struct skl_dpll_regs *regs = skl_dpll_regs;
- bool ret;
-
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
- return false;
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
- ret = false;
-
- /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
- dpll = pll->id + 1;
-
- val = I915_READ(regs[pll->id].ctl);
- if (!(val & LCPLL_PLL_ENABLE))
- goto out;
-
- val = I915_READ(DPLL_CTRL1);
- hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
-
- /* avoid reading back stale values if HDMI mode is not enabled */
- if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
- hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
- hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
- }
- ret = true;
-
-out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+static void broxton_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ enum port port;
+ u32 ports, val;
- return ret;
-}
+ if (broxton_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy == DPIO_PHY0)
+ dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
-static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
- int i;
+ if (broxton_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
- dev_priv->num_shared_dpll = 3;
+ return;
+ }
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
- dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
- dev_priv->shared_dplls[i].get_hw_state =
- skl_ddi_pll_get_hw_state;
+ DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ } else {
+ DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
}
-}
-
-static void broxton_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- enum port port;
- uint32_t val;
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
- /* Considering 10ms timeout until BSpec is updated */
- if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * HW team confirmed that the time to reach phypowergood status is
+ * anywhere between 50 us and 100us.
+ */
+ if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
DRM_ERROR("timeout during PHY%d power on\n", phy);
+ }
+
+ if (phy == DPIO_PHY0)
+ ports = BIT(PORT_B) | BIT(PORT_C);
+ else
+ ports = BIT(PORT_A);
- for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A);
- port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
+ for_each_port_masked(port, ports) {
int lane;
for (lane = 0; lane < 4; lane++) {
@@ -2742,6 +1884,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* enabled.
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
* power down the second channel on PHY0 as well.
+ *
+ * FIXME: Clarify programming of the following, the register is
+ * read-only with bit 6 fixed at 0 at least in stepping A.
*/
if (phy == DPIO_PHY1)
val |= OCL2_LDOFUSE_PWR_DIS;
@@ -2754,12 +1899,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
- 10))
- DRM_ERROR("timeout waiting for PHY1 GRC\n");
+ broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
- val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
- val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+ val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
+ DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
@@ -2769,17 +1912,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
+ /*
+ * During PHY1 init delay waiting for GRC calibration to finish, since
+ * it can happen in parallel with the subsequent PHY0 init.
+ */
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
}
-void broxton_ddi_phy_init(struct drm_device *dev)
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
{
/* Enable PHY1 first since it provides Rcomp for PHY0 */
- broxton_phy_init(dev->dev_private, DPIO_PHY1);
- broxton_phy_init(dev->dev_private, DPIO_PHY0);
+ broxton_phy_init(dev_priv, DPIO_PHY1);
+ broxton_phy_init(dev_priv, DPIO_PHY0);
+
+ /*
+ * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
+ * PHY1 GRC calibration to finish, so wait for it here.
+ */
+ broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
@@ -2790,260 +1943,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val &= ~GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
-void broxton_ddi_phy_uninit(struct drm_device *dev)
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
broxton_phy_uninit(dev_priv, DPIO_PHY1);
broxton_phy_uninit(dev_priv, DPIO_PHY0);
-
- /* FIXME: do this in broxton_phy_uninit per phy */
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
}
-static const char * const bxt_ddi_pll_names[] = {
- "PORT PLL A",
- "PORT PLL B",
- "PORT PLL C",
-};
-
-static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
{
- uint32_t temp;
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
-
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_REF_SEL;
- /* Non-SSC reference */
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-
- /* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-
- /* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
- temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->config.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
-
- /* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(port, 0));
- temp &= ~PORT_PLL_M2_MASK;
- temp |= pll->config.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(port, 0), temp);
-
- /* Write N */
- temp = I915_READ(BXT_PORT_PLL(port, 1));
- temp &= ~PORT_PLL_N_MASK;
- temp |= pll->config.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(port, 1), temp);
-
- /* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(port, 2));
- temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->config.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(port, 2), temp);
-
- /* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(port, 3));
- temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->config.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(port, 3), temp);
-
- /* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(port, 6));
- temp &= ~PORT_PLL_PROP_COEFF_MASK;
- temp &= ~PORT_PLL_INT_COEFF_MASK;
- temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->config.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(port, 6), temp);
-
- /* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(port, 8));
- temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->config.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(port, 8), temp);
-
- temp = I915_READ(BXT_PORT_PLL(port, 9));
- temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->config.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(port, 9), temp);
-
- temp = I915_READ(BXT_PORT_PLL(port, 10));
- temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
- temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->config.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(port, 10), temp);
-
- /* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
- temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->config.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-
- /* Enable PLL */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-
- if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_LOCK), 200))
- DRM_ERROR("PLL %d not locked\n", port);
+ struct va_format vaf;
+ va_list args;
+ u32 val;
- /*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers and we pick lanes 0/1 for that.
- */
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- temp &= ~LANE_STAGGER_MASK;
- temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->config.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
-}
-
-static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
- uint32_t temp;
-
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-}
+ val = I915_READ(reg);
+ if ((val & mask) == expected)
+ return true;
-static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
- uint32_t val;
- bool ret;
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
- return false;
+ DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
- ret = false;
+ va_end(args);
- val = I915_READ(BXT_PORT_PLL_ENABLE(port));
- if (!(val & PORT_PLL_ENABLE))
- goto out;
+ return false;
+}
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
- hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ enum port port;
+ u32 ports;
+ uint32_t mask;
+ bool ok;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
- hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
- hw_state->pll0 &= PORT_PLL_M2_MASK;
+ /* We expect the PHY to be always enabled */
+ if (!broxton_phy_is_enabled(dev_priv, phy))
+ return false;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
- hw_state->pll1 &= PORT_PLL_N_MASK;
+ ok = true;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
- hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+ if (phy == DPIO_PHY0)
+ ports = BIT(PORT_B) | BIT(PORT_C);
+ else
+ ports = BIT(PORT_A);
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
- hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+ for_each_port_masked(port, ports) {
+ int lane;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
- hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
- PORT_PLL_INT_COEFF_MASK |
- PORT_PLL_GAIN_CTL_MASK;
+ for (lane = 0; lane < 4; lane++)
+ ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
+ LATENCY_OPTIM,
+ lane != 1 ? LATENCY_OPTIM : 0,
+ "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
+ }
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
- hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
- hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
- hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
- PORT_PLL_DCO_AMP_MASK;
+ if (phy == DPIO_PHY0)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6_BC");
/*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers. We configure all lanes the same way, so
- * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+ * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
+ * at least on stepping A this bit is read-only and fixed at 0.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
- DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
- hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
- hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
-
- ret = true;
-
-out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
- return ret;
-}
-
-static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
- int i;
+ if (phy == DPIO_PHY0) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
- dev_priv->num_shared_dpll = 3;
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable;
- dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable;
- dev_priv->shared_dplls[i].get_hw_state =
- bxt_ddi_pll_get_hw_state;
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
}
+
+ return ok;
+#undef _CHK
}
-void intel_ddi_pll_init(struct drm_device *dev)
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- skl_shared_dplls_init(dev_priv);
- else if (IS_BROXTON(dev))
- bxt_shared_dplls_init(dev_priv);
- else
- hsw_shared_dplls_init(dev_priv);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- int cdclk_freq;
-
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- dev_priv->skl_boot_cdclk = cdclk_freq;
- if (skl_sanitize_cdclk(dev_priv))
- DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
- DRM_ERROR("LCPLL1 is disabled\n");
- } else if (IS_BROXTON(dev)) {
- broxton_init_cdclk(dev);
- broxton_ddi_phy_init(dev);
- } else {
- /*
- * The LCPLL register should be turned on by the BIOS. For now
- * let's just check its state and print errors in case
- * something is wrong. Don't even try to turn it on.
- */
-
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
-
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
- }
+ if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
+ !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
+ i915_report_error(dev_priv, "DDI PHY state mismatch\n");
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3098,12 +2117,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val;
- intel_ddi_post_disable(intel_encoder);
-
+ /*
+ * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+ * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+ * step 13 is the correct place for it. Step 18 is where it was
+ * originally before the BUN.
+ */
val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_ddi_post_disable(intel_encoder);
+
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
@@ -3127,6 +2152,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
+ /* XXX: DSI transcoder paranoia */
+ if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ return;
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3163,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
- break;
+ /* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->lane_count = 4;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
@@ -3184,8 +2215,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = true;
}
- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
*
@@ -3200,8 +2231,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
* load.
*/
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
- dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
intel_ddi_clock_get(encoder, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e5db9e1f6..3074c56a6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -96,12 +97,13 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2);
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void haswell_set_pipemisc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -110,13 +112,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
- int num_connectors);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
-static void intel_pre_disable_primary(struct drm_crtc *crtc);
+static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
typedef struct {
int min, max;
@@ -147,15 +147,12 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
return vco_freq[hpll_freq] * 1000;
}
-static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg)
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq)
{
u32 val;
int divider;
- if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
-
mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, reg);
mutex_unlock(&dev_priv->sb_lock);
@@ -166,52 +163,75 @@ static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
- return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
+ return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
-int
-intel_pch_rawclk(struct drm_device *dev)
+static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->hpll_freq == 0)
+ dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
- WARN_ON(!HAS_PCH_SPLIT(dev));
+ return vlv_get_cck_clock(dev_priv, name, reg,
+ dev_priv->hpll_freq);
+}
- return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+static int
+intel_pch_rawclk(struct drm_i915_private *dev_priv)
+{
+ return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-/* hrawclock is 1/4 the FSB frequency */
-int intel_hrawclk(struct drm_device *dev)
+static int
+intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t clkcfg;
+ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ CCK_DISPLAY_REF_CLOCK_CONTROL);
+}
- /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- return 200;
+static int
+intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
+{
+ uint32_t clkcfg;
+ /* hrawclock is 1/4 the FSB frequency */
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
- return 100;
+ return 100000;
case CLKCFG_FSB_533:
- return 133;
+ return 133333;
case CLKCFG_FSB_667:
- return 166;
+ return 166667;
case CLKCFG_FSB_800:
- return 200;
+ return 200000;
case CLKCFG_FSB_1067:
- return 266;
+ return 266667;
case CLKCFG_FSB_1333:
- return 333;
+ return 333333;
/* these two are just a guess; one of them might be right */
case CLKCFG_FSB_1600:
case CLKCFG_FSB_1600_ALT:
- return 400;
+ return 400000;
default:
- return 133;
+ return 133333;
}
}
+static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+{
+ if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
+ else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
+ dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
+ else
+ return; /* no rawclk on other platforms, or no need to know it */
+
+ DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+}
+
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
@@ -224,13 +244,15 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
}
static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_device *dev)
+intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
{
- if (IS_GEN5(dev)) {
- struct drm_i915_private *dev_priv = dev->dev_private;
- return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
- } else
- return 27;
+ if (HAS_DDI(dev_priv))
+ return pipe_config->port_clock; /* SPLL */
+ else if (IS_GEN5(dev_priv))
+ return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
+ else
+ return 270000;
}
static const intel_limit_t intel_limits_i8xx_dac = {
@@ -550,89 +572,6 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
return false;
}
-static const intel_limit_t *
-intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- const intel_limit_t *limit;
-
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_is_dual_link_lvds(dev)) {
- if (refclk == 100000)
- limit = &intel_limits_ironlake_dual_lvds_100m;
- else
- limit = &intel_limits_ironlake_dual_lvds;
- } else {
- if (refclk == 100000)
- limit = &intel_limits_ironlake_single_lvds_100m;
- else
- limit = &intel_limits_ironlake_single_lvds;
- }
- } else
- limit = &intel_limits_ironlake_dac;
-
- return limit;
-}
-
-static const intel_limit_t *
-intel_g4x_limit(struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- const intel_limit_t *limit;
-
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_is_dual_link_lvds(dev))
- limit = &intel_limits_g4x_dual_channel_lvds;
- else
- limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits_g4x_sdvo;
- } else /* The option is for other outputs */
- limit = &intel_limits_i9xx_sdvo;
-
- return limit;
-}
-
-static const intel_limit_t *
-intel_limit(struct intel_crtc_state *crtc_state, int refclk)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- const intel_limit_t *limit;
-
- if (IS_BROXTON(dev))
- limit = &intel_limits_bxt;
- else if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc_state, refclk);
- else if (IS_G4X(dev)) {
- limit = intel_g4x_limit(crtc_state);
- } else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_pineview_lvds;
- else
- limit = &intel_limits_pineview_sdvo;
- } else if (IS_CHERRYVIEW(dev)) {
- limit = &intel_limits_chv;
- } else if (IS_VALLEYVIEW(dev)) {
- limit = &intel_limits_vlv;
- } else if (!IS_GEN2(dev)) {
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i9xx_lvds;
- else
- limit = &intel_limits_i9xx_sdvo;
- } else {
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
- limit = &intel_limits_i8xx_dvo;
- else
- limit = &intel_limits_i8xx_dac;
- }
- return limit;
-}
-
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -763,6 +702,16 @@ i9xx_select_p2_div(const intel_limit_t *limit,
}
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
static bool
i9xx_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -810,6 +759,16 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
return (err != target);
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
static bool
pnv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -855,6 +814,16 @@ pnv_find_best_dpll(const intel_limit_t *limit,
return (err != target);
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
static bool
g4x_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -943,6 +912,11 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
return *error_ppm + 10 < best_error_ppm;
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
static bool
vlv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -997,6 +971,11 @@ vlv_find_best_dpll(const intel_limit_t *limit,
return found;
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
static bool
chv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -1058,9 +1037,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock)
{
- int refclk = i9xx_get_refclk(crtc_state, 0);
+ int refclk = 100000;
+ const intel_limit_t *limit = &intel_limits_bxt;
- return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
+ return chv_find_best_dpll(limit, crtc_state,
target_clock, refclk, NULL, best_clock);
}
@@ -1165,7 +1145,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
}
/* XXX: the dsi pll is shared between MIPI DSI ports */
-static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
{
u32 val;
bool cur_state;
@@ -1179,36 +1159,6 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
"DSI PLL state assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
- if (crtc->config->shared_dpll < 0)
- return NULL;
-
- return &dev_priv->shared_dplls[crtc->config->shared_dpll];
-}
-
-/* For ILK+ */
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state)
-{
- bool cur_state;
- struct intel_dpll_hw_state hw_state;
-
- if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
- return;
-
- cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
- I915_STATE_WARN(cur_state != state,
- "%s assertion failure (expected %s, current %s)\n",
- pll->name, onoff(state), onoff(cur_state));
-}
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1217,7 +1167,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- if (HAS_DDI(dev_priv->dev)) {
+ if (HAS_DDI(dev_priv)) {
/* DDI does not have a specific FDI_TX register */
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1253,11 +1203,11 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (INTEL_INFO(dev_priv->dev)->gen == 5)
+ if (INTEL_INFO(dev_priv)->gen == 5)
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (HAS_DDI(dev_priv->dev))
+ if (HAS_DDI(dev_priv))
return;
val = I915_READ(FDI_TX_CTL(pipe));
@@ -1446,21 +1396,8 @@ static void assert_vblank_disabled(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
-static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
-{
- u32 val;
- bool enabled;
-
- I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
-
- val = I915_READ(PCH_DREF_CONTROL);
- enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
- DREF_SUPERSPREAD_SOURCE_MASK));
- I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
-}
-
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
u32 val;
bool enabled;
@@ -1478,11 +1415,11 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
if ((val & DP_PORT_EN) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
return false;
} else {
@@ -1498,10 +1435,10 @@ static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
if ((val & SDVO_ENABLE) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
return false;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
return false;
} else {
@@ -1517,7 +1454,7 @@ static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
if ((val & LVDS_PORT_EN) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
@@ -1532,7 +1469,7 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
{
if ((val & ADPA_DAC_ENABLE) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
@@ -1551,7 +1488,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
i915_mmio_reg_offset(reg), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1564,7 +1501,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
i915_mmio_reg_offset(reg), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1593,53 +1530,47 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+}
+
static void vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- i915_reg_t reg = DPLL(crtc->pipe);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev_priv->dev))
- assert_panel_unlocked(dev_priv, crtc->pipe);
-
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150);
-
- if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
- DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+ assert_panel_unlocked(dev_priv, pipe);
- I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(crtc->pipe));
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _vlv_enable_pll(crtc, pipe_config);
- /* We do this three times for luck */
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150); /* wait for warmup */
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150); /* wait for warmup */
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150); /* wait for warmup */
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
}
-static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 tmp;
- assert_pipe_disabled(dev_priv, crtc->pipe);
-
mutex_lock(&dev_priv->sb_lock);
/* Enable back the 10bit clock to display controller */
@@ -1660,10 +1591,43 @@ static void chv_enable_pll(struct intel_crtc *crtc,
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
+}
- /* not sure when this should be written */
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+static void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_panel_unlocked(dev_priv, pipe);
+
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _chv_enable_pll(crtc, pipe_config);
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+ I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
+ I915_WRITE(CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
+ }
}
static int intel_num_dvo_pipes(struct drm_device *dev)
@@ -1687,9 +1651,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
assert_pipe_disabled(dev_priv, crtc->pipe);
- /* No really, not for ILK+ */
- BUG_ON(INTEL_INFO(dev)->gen >= 5);
-
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
@@ -1788,16 +1749,13 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- /*
- * Leave integrated clock source and reference clock enabled for pipe B.
- * The latter is needed for VGA hotplug / manual detection.
- */
- val = DPLL_VGA_MODE_DIS;
- if (pipe == PIPE_B)
- val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
+ val = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
-
}
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1808,11 +1766,11 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- /* Set PLL en = 0 */
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
@@ -1856,100 +1814,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- if (WARN_ON(pll == NULL))
- return;
-
- WARN_ON(!pll->config.crtc_mask);
- if (pll->active == 0) {
- DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
- WARN_ON(pll->on);
- assert_shared_dpll_disabled(dev_priv, pll);
-
- pll->mode_set(dev_priv, pll);
- }
-}
-
-/**
- * intel_enable_shared_dpll - enable PCH PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * The PCH PLL needs to be enabled before the PCH transcoder, since it
- * drives the transcoder clock.
- */
-static void intel_enable_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- if (WARN_ON(pll == NULL))
- return;
-
- if (WARN_ON(pll->config.crtc_mask == 0))
- return;
-
- DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
- pll->name, pll->active, pll->on,
- crtc->base.base.id);
-
- if (pll->active++) {
- WARN_ON(!pll->on);
- assert_shared_dpll_enabled(dev_priv, pll);
- return;
- }
- WARN_ON(pll->on);
-
- intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
-
- DRM_DEBUG_KMS("enabling %s\n", pll->name);
- pll->enable(dev_priv, pll);
- pll->on = true;
-}
-
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- /* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
- return;
-
- if (pll == NULL)
- return;
-
- if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
- return;
-
- DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
- pll->name, pll->active, pll->on,
- crtc->base.base.id);
-
- if (WARN_ON(pll->active == 0)) {
- assert_shared_dpll_disabled(dev_priv, pll);
- return;
- }
-
- assert_shared_dpll_enabled(dev_priv, pll);
- WARN_ON(!pll->on);
- if (--pll->active)
- return;
-
- DRM_DEBUG_KMS("disabling %s\n", pll->name);
- pll->disable(dev_priv, pll);
- pll->on = false;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-}
-
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1959,12 +1823,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
i915_reg_t reg;
uint32_t val, pipeconf_val;
- /* PCH only available on ILK+ */
- BUG_ON(!HAS_PCH_SPLIT(dev));
-
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv,
- intel_crtc_to_shared_dpll(intel_crtc));
+ assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1983,7 +1843,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
- if (HAS_PCH_IBX(dev_priv->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
@@ -1998,7 +1858,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
- if (HAS_PCH_IBX(dev_priv->dev) &&
+ if (HAS_PCH_IBX(dev_priv) &&
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
@@ -2016,9 +1876,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
{
u32 val, pipeconf_val;
- /* PCH only available on ILK+ */
- BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
-
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
@@ -2113,7 +1970,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
- if (HAS_PCH_LPT(dev_priv->dev))
+ if (HAS_PCH_LPT(dev_priv))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
@@ -2123,7 +1980,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH_DISPLAY(dev_priv->dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
if (crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
@@ -2225,8 +2082,8 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN2(dev_priv) ? 2048 : 4096;
}
-static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, unsigned int cpp)
+static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
@@ -2269,7 +2126,21 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
return 1;
else
return intel_tile_size(dev_priv) /
- intel_tile_width(dev_priv, fb_modifier, cpp);
+ intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+}
+
+/* Return the tile dimensions in pixel units */
+static void intel_tile_dims(const struct drm_i915_private *dev_priv,
+ unsigned int *tile_width,
+ unsigned int *tile_height,
+ uint64_t fb_modifier,
+ unsigned int cpp)
+{
+ unsigned int tile_width_bytes =
+ intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+
+ *tile_width = tile_width_bytes / cpp;
+ *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
}
unsigned int
@@ -2282,48 +2153,54 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
return ALIGN(height, tile_height);
}
-static void
-intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state)
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- struct intel_rotation_info *info = &view->params.rotated;
- unsigned int tile_size, tile_width, tile_height, cpp;
-
- *view = i915_ggtt_view_normal;
+ unsigned int size = 0;
+ int i;
- if (!plane_state)
- return;
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+ size += rot_info->plane[i].width * rot_info->plane[i].height;
- if (!intel_rotation_90_or_270(plane_state->rotation))
- return;
+ return size;
+}
- *view = i915_ggtt_view_rotated;
+static void
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
+ const struct drm_framebuffer *fb,
+ unsigned int rotation)
+{
+ if (intel_rotation_90_or_270(rotation)) {
+ *view = i915_ggtt_view_rotated;
+ view->params.rotated = to_intel_framebuffer(fb)->rot_info;
+ } else {
+ *view = i915_ggtt_view_normal;
+ }
+}
- info->height = fb->height;
- info->pixel_format = fb->pixel_format;
- info->pitch = fb->pitches[0];
- info->uv_offset = fb->offsets[1];
- info->fb_modifier = fb->modifier[0];
+static void
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+ struct drm_framebuffer *fb)
+{
+ struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
+ unsigned int tile_size, tile_width, tile_height, cpp;
tile_size = intel_tile_size(dev_priv);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
- tile_height = tile_size / tile_width;
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb->modifier[0], cpp);
- info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
- info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
- info->size = info->width_pages * info->height_pages * tile_size;
+ info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
+ info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
if (info->pixel_format == DRM_FORMAT_NV12) {
cpp = drm_format_plane_cpp(fb->pixel_format, 1);
- tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
- tile_height = tile_size / tile_width;
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb->modifier[1], cpp);
- info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
- info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
- info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
+ info->uv_offset = fb->offsets[1];
+ info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
+ info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
}
}
@@ -2360,9 +2237,8 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv
}
int
-intel_pin_and_fence_fb_obj(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state)
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2375,7 +2251,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
- intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ intel_fill_fb_ggtt_view(&view, fb, rotation);
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
@@ -2433,15 +2309,14 @@ err_pm:
return ret;
}
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
- intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ intel_fill_fb_ggtt_view(&view, fb, rotation);
if (view.type == I915_GGTT_VIEW_NORMAL)
i915_gem_object_unpin_fence(obj);
@@ -2449,38 +2324,93 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
i915_gem_object_unpin_from_display_plane(obj, &view);
}
-/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
- * is assumed to be a power-of-two. */
-u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- uint64_t fb_modifier,
- unsigned int cpp,
- unsigned int pitch)
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ *
+ * Input tile dimensions and pitch must already be
+ * rotated to match x and y, and in pixel units.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
{
+ unsigned int tiles;
+
+ WARN_ON(old_offset & (tile_size - 1));
+ WARN_ON(new_offset & (tile_size - 1));
+ WARN_ON(new_offset > old_offset);
+
+ tiles = (old_offset - new_offset) / tile_size;
+
+ *y += tiles / pitch_tiles * tile_height;
+ *x += tiles % pitch_tiles * tile_width;
+
+ return new_offset;
+}
+
+/*
+ * Computes the linear offset to the base tile and adjusts
+ * x, y. bytes per pixel is assumed to be a power-of-two.
+ *
+ * In the 90/270 rotated case, x and y are assumed
+ * to be already rotated to match the rotated GTT view, and
+ * pitch is the tile_height aligned framebuffer height.
+ */
+u32 intel_compute_tile_offset(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane,
+ unsigned int pitch,
+ unsigned int rotation)
+{
+ const struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ uint64_t fb_modifier = fb->modifier[plane];
+ unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ u32 offset, offset_aligned, alignment;
+
+ alignment = intel_surf_alignment(dev_priv, fb_modifier);
+ if (alignment)
+ alignment--;
+
if (fb_modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_size, tile_width, tile_height;
- unsigned int tile_rows, tiles;
+ unsigned int tile_rows, tiles, pitch_tiles;
tile_size = intel_tile_size(dev_priv);
- tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
- tile_height = tile_size / tile_width;
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb_modifier, cpp);
+
+ if (intel_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
tile_rows = *y / tile_height;
*y %= tile_height;
- tiles = *x / (tile_width/cpp);
- *x %= tile_width/cpp;
+ tiles = *x / tile_width;
+ *x %= tile_width;
- return tile_rows * pitch * tile_height + tiles * tile_size;
- } else {
- unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
- unsigned int offset;
+ offset = (tile_rows * pitch_tiles + tiles) * tile_size;
+ offset_aligned = offset & ~alignment;
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
+ } else {
offset = *y * pitch + *x * cpp;
+ offset_aligned = offset & ~alignment;
+
*y = (offset & alignment) / pitch;
*x = ((offset & alignment) - *y * pitch) / cpp;
- return offset & ~alignment;
}
+
+ return offset_aligned;
}
static int i9xx_format_to_fourcc(int format)
@@ -2536,6 +2466,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2551,7 +2482,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
+ if (size_aligned * 2 > ggtt->stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
@@ -2667,7 +2598,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
*/
to_intel_plane_state(plane_state)->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
- intel_pre_disable_primary(&intel_crtc->base);
+ intel_pre_disable_primary_noatomic(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
return;
@@ -2716,6 +2647,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int x = plane_state->src.x1 >> 16;
int y = plane_state->src.y1 >> 16;
@@ -2780,15 +2712,14 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
}
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
x += (crtc_state->pipe_src_w - 1);
@@ -2846,6 +2777,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int x = plane_state->src.x1 >> 16;
int y = plane_state->src.y1 >> 16;
@@ -2887,11 +2819,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
linear_offset = y * fb->pitches[0] + x * cpp;
intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= intel_crtc->dspaddr_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
@@ -2931,7 +2862,7 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
} else {
int cpp = drm_format_plane_cpp(pixel_format, 0);
- return intel_tile_width(dev_priv, fb_modifier, cpp);
+ return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
}
}
@@ -2944,7 +2875,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
u64 offset;
intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
- intel_plane->base.state);
+ intel_plane->base.state->rotation);
vma = i915_gem_obj_to_ggtt_view(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
@@ -3284,12 +3215,12 @@ void intel_finish_reset(struct drm_device *dev)
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned reset_counter;
bool pending;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
+ if (intel_crtc->reset_counter != reset_counter)
return false;
spin_lock_irq(&dev->event_lock);
@@ -3314,9 +3245,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- if (HAS_DDI(dev))
- intel_set_pipe_csc(&crtc->base);
-
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
@@ -3894,9 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
intel_crtc->unpin_work = NULL;
if (work->event)
- drm_send_vblank_event(intel_crtc->base.dev,
- intel_crtc->pipe,
- work->event);
+ drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
drm_crtc_vblank_put(&intel_crtc->base);
@@ -3955,37 +3881,35 @@ static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
lpt_disable_iclkip(dev_priv);
- /* 20MHz is a corner case which is out of range for the 7-bit divisor */
- if (clock == 20000) {
- auxdiv = 1;
- divsel = 0x41;
- phaseinc = 0x20;
- } else {
- /* The iCLK virtual clock root frequency is in MHz,
- * but the adjusted_mode->crtc_clock in in KHz. To get the
- * divisors, it is necessary to divide one by another, so we
- * convert the virtual clock precision to KHz here for higher
- * precision.
- */
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the adjusted_mode->crtc_clock in in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ for (auxdiv = 0; auxdiv < 2; auxdiv++) {
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
- u32 desired_divisor, msb_divisor_value, pi_value;
+ u32 desired_divisor;
- desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
- msb_divisor_value = desired_divisor / iclk_pi_range;
- pi_value = desired_divisor % iclk_pi_range;
+ desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+ clock << auxdiv);
+ divsel = (desired_divisor / iclk_pi_range) - 2;
+ phaseinc = desired_divisor % iclk_pi_range;
- auxdiv = 0;
- divsel = msb_divisor_value - 2;
- phaseinc = pi_value;
+ /*
+ * Near 20MHz is a corner case which is
+ * out of range for the 7-bit divisor
+ */
+ if (divsel <= 0x7f)
+ break;
}
/* This should not happen with any sane values */
@@ -4032,6 +3956,43 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
+int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+{
+ u32 divsel, phaseinc, auxdiv;
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor;
+ u32 temp;
+
+ if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ return 0;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ if (temp & SBI_SSCCTL_DISABLE) {
+ mutex_unlock(&dev_priv->sb_lock);
+ return 0;
+ }
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+ SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
+ phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+ SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+ SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+
+ return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+ desired_divisor << auxdiv);
+}
+
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
enum pipe pch_transcoder)
{
@@ -4142,12 +4103,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
- /*
- * Sometimes spurious CPU pipe underruns happen during FDI
- * training, at least with VGA+HDMI cloning. Suppress them.
- */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
@@ -4159,7 +4114,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
+ if (intel_crtc->config->shared_dpll ==
+ intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
@@ -4181,8 +4137,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
const struct drm_display_mode *adjusted_mode =
@@ -4238,113 +4192,6 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct intel_shared_dpll *pll;
- struct intel_shared_dpll_config *shared_dpll;
- enum intel_dpll_id i;
- int max = dev_priv->num_shared_dpll;
-
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
-
- if (HAS_PCH_IBX(dev_priv->dev)) {
- /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
- i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
-
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
-
- WARN_ON(shared_dpll[i].crtc_mask);
-
- goto found;
- }
-
- if (IS_BROXTON(dev_priv->dev)) {
- /* PLL is attached to port in bxt */
- struct intel_encoder *encoder;
- struct intel_digital_port *intel_dig_port;
-
- encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
- if (WARN_ON(!encoder))
- return NULL;
-
- intel_dig_port = enc_to_dig_port(&encoder->base);
- /* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id)intel_dig_port->port;
- pll = &dev_priv->shared_dplls[i];
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
- WARN_ON(shared_dpll[i].crtc_mask);
-
- goto found;
- } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
- /* Do not consider SPLL */
- max = 2;
-
- for (i = 0; i < max; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- /* Only want to check enabled timings first */
- if (shared_dpll[i].crtc_mask == 0)
- continue;
-
- if (memcmp(&crtc_state->dpll_hw_state,
- &shared_dpll[i].hw_state,
- sizeof(crtc_state->dpll_hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
- crtc->base.base.id, pll->name,
- shared_dpll[i].crtc_mask,
- pll->active);
- goto found;
- }
- }
-
- /* Ok no matching timings, maybe there's a free one? */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
- if (shared_dpll[i].crtc_mask == 0) {
- DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
- crtc->base.base.id, pll->name);
- goto found;
- }
- }
-
- return NULL;
-
-found:
- if (shared_dpll[i].crtc_mask == 0)
- shared_dpll[i].hw_state =
- crtc_state->dpll_hw_state;
-
- crtc_state->shared_dpll = i;
- DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
- pipe_name(crtc->pipe));
-
- shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
-
- return pll;
-}
-
-static void intel_shared_dpll_commit(struct drm_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_config *shared_dpll;
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
-
- if (!to_intel_atomic_state(state)->dpll_set)
- return;
-
- shared_dpll = to_intel_atomic_state(state)->shared_dpll;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
- pll->config = shared_dpll[i];
- }
-}
-
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4576,8 +4423,11 @@ void hsw_enable_ips(struct intel_crtc *crtc)
if (!crtc->config->ips_enabled)
return;
- /* We can only enable IPS after we enable a plane and wait for a vblank */
- intel_wait_for_vblank(dev, crtc->pipe);
+ /*
+ * We can only enable IPS after we enable a plane and wait for a vblank
+ * This function is called from post_plane_update, which is run after
+ * a vblank wait.
+ */
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(dev)) {
@@ -4626,55 +4476,6 @@ void hsw_disable_ips(struct intel_crtc *crtc)
intel_wait_for_vblank(dev, crtc->pipe);
}
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- int i;
- bool reenable_ips = false;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->state->active)
- return;
-
- if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
- if (intel_crtc->config->has_dsi_encoder)
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
-
- /* Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- */
- if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
- ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
- GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc);
- reenable_ips = true;
- }
-
- for (i = 0; i < 256; i++) {
- i915_reg_t palreg;
-
- if (HAS_GMCH_DISPLAY(dev))
- palreg = PALETTE(pipe, i);
- else
- palreg = LGC_PALETTE(pipe, i);
-
- I915_WRITE(palreg,
- (intel_crtc->lut_r[i] << 16) |
- (intel_crtc->lut_g[i] << 8) |
- intel_crtc->lut_b[i]);
- }
-
- if (reenable_ips)
- hsw_enable_ips(intel_crtc);
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
if (intel_crtc->overlay) {
@@ -4734,16 +4535,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
intel_check_pch_fifo_underruns(dev_priv);
}
-/**
- * intel_pre_disable_primary - Perform operations before disabling primary plane
- * @crtc: the CRTC whose primary plane is to be disabled
- *
- * Performs potentially sleeping operations that must be done before the
- * primary plane is disabled, such as updating FBC and IPS. Note that this may
- * be called due to an explicit primary plane update, or due to an implicit
- * disable that is caused when a sprite plane completely hides the primary
- * plane.
- */
+/* FIXME move all this to pre_plane_update() with proper state tracking */
static void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
@@ -4762,6 +4554,26 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_disable_ips(intel_crtc);
+}
+
+/* FIXME get rid of this and use pre_plane_update */
+static void
+intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ intel_pre_disable_primary(crtc);
+
+ /*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
@@ -4775,37 +4587,39 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
dev_priv->wm.vlv.cxsr = false;
intel_wait_for_vblank(dev, pipe);
}
-
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_disable_ips(intel_crtc);
}
-static void intel_post_plane_update(struct intel_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
+ struct drm_plane *primary = crtc->base.primary;
+ struct drm_plane_state *old_pri_state =
+ drm_atomic_get_existing_plane_state(old_state, primary);
- intel_frontbuffer_flip(dev, atomic->fb_bits);
+ intel_frontbuffer_flip(dev, pipe_config->fb_bits);
crtc->wm.cxsr_allowed = true;
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(&crtc->base);
- if (atomic->update_fbc)
- intel_fbc_post_update(crtc);
+ if (old_pri_state) {
+ struct intel_plane_state *primary_state =
+ to_intel_plane_state(primary->state);
+ struct intel_plane_state *old_primary_state =
+ to_intel_plane_state(old_pri_state);
- if (atomic->post_enable_primary)
- intel_post_enable_primary(&crtc->base);
+ intel_fbc_post_update(crtc);
- memset(atomic, 0, sizeof(*atomic));
+ if (primary_state->visible &&
+ (needs_modeset(&pipe_config->base) ||
+ !old_primary_state->visible))
+ intel_post_enable_primary(&crtc->base);
+ }
}
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
@@ -4813,7 +4627,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4822,15 +4635,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
- if (atomic->update_fbc)
- intel_fbc_pre_update(crtc);
-
if (old_pri_state) {
struct intel_plane_state *primary_state =
to_intel_plane_state(primary->state);
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
+ intel_fbc_pre_update(crtc);
+
if (old_primary_state->visible &&
(modeset || !primary_state->visible))
intel_pre_disable_primary(&crtc->base);
@@ -4839,11 +4651,58 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (pipe_config->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
- if (old_crtc_state->base.active)
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (old_crtc_state->base.active) {
intel_set_memory_cxsr(dev_priv, false);
+ dev_priv->wm.vlv.cxsr = false;
+ intel_wait_for_vblank(dev, crtc->pipe);
+ }
+ }
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ */
+ if (pipe_config->disable_lp_wm) {
+ ilk_disable_lp_wm(dev);
+ intel_wait_for_vblank(dev, crtc->pipe);
}
- if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
+ /*
+ * If we're doing a modeset, we're done. No need to do any pre-vblank
+ * watermark programming here.
+ */
+ if (needs_modeset(&pipe_config->base))
+ return;
+
+ /*
+ * For platforms that support atomic watermarks, program the
+ * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
+ * will be the intermediate values that are safe for both pre- and
+ * post- vblank; when vblank happens, the 'active' values will be set
+ * to the final 'target' values and we'll do this again to get the
+ * optimal watermarks. For gen9+ platforms, the values we program here
+ * will be the final target values which will get automatically latched
+ * at vblank time; no further programming will be necessary.
+ *
+ * If a platform hasn't been transitioned to atomic watermarks yet,
+ * we'll continue to update watermarks the old way, if flags tell
+ * us to.
+ */
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(pipe_config);
+ else if (pipe_config->update_wm_pre)
intel_update_watermarks(&crtc->base);
}
@@ -4874,10 +4733,24 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
if (WARN_ON(intel_crtc->active))
return;
+ /*
+ * Sometimes spurious CPU pipe underruns happen during FDI
+ * training, at least with VGA+HDMI cloning. Suppress them.
+ *
+ * On ILK we get an occasional spurious CPU pipe underruns
+ * between eDP port A enable and vdd enable. Also PCH port
+ * enable seems to result in the occasional CPU pipe underrun.
+ *
+ * Spurious PCH underruns also occur during PCH enabling.
+ */
+ if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -4888,6 +4761,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_src_size(intel_crtc);
if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
@@ -4898,8 +4772,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@@ -4920,9 +4792,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -4940,6 +4813,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -4956,6 +4830,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
@@ -4966,16 +4841,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
- if (intel_crtc_to_shared_dpll(intel_crtc))
+ if (intel_crtc->config->shared_dpll)
intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
- intel_set_pipe_timings(intel_crtc);
+ if (!intel_crtc->config->has_dsi_encoder)
+ intel_set_pipe_timings(intel_crtc);
+
+ intel_set_pipe_src_size(intel_crtc);
- if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
- I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+ if (cpu_transcoder != TRANSCODER_EDP &&
+ !transcoder_is_dsi(cpu_transcoder)) {
+ I915_WRITE(PIPE_MULT(cpu_transcoder),
intel_crtc->config->pixel_multiplier - 1);
}
@@ -4984,9 +4863,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
&intel_crtc->config->fdi_m_n, NULL);
}
- haswell_set_pipeconf(crtc);
+ if (!intel_crtc->config->has_dsi_encoder)
+ haswell_set_pipeconf(crtc);
+
+ haswell_set_pipemisc(crtc);
- intel_set_pipe_csc(crtc);
+ intel_color_set_csc(&pipe_config->base);
intel_crtc->active = true;
@@ -5015,14 +4897,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
intel_ddi_set_pipe_settings(crtc);
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_transcoder_func(crtc);
- intel_update_watermarks(crtc);
- intel_enable_pipe(intel_crtc);
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(pipe_config);
+ else
+ intel_update_watermarks(crtc);
+
+ /* XXX: Do the pipe assertions at the right place for BXT DSI. */
+ if (!intel_crtc->config->has_dsi_encoder)
+ intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
@@ -5078,8 +4966,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- if (intel_crtc->config->has_pch_encoder)
+ /*
+ * Sometimes spurious CPU pipe underruns happen when the
+ * pipe is already disabled, but FDI RX/TX is still enabled.
+ * Happens at least with VGA+HDMI cloning. Suppress them.
+ */
+ if (intel_crtc->config->has_pch_encoder) {
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ }
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -5087,22 +4982,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- /*
- * Sometimes spurious CPU pipe underruns happen when the
- * pipe is already disabled, but FDI RX/TX is still enabled.
- * Happens at least with VGA+HDMI cloning. Suppress them.
- */
- if (intel_crtc->config->has_pch_encoder)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
intel_disable_pipe(intel_crtc);
ironlake_pfit_disable(intel_crtc, false);
- if (intel_crtc->config->has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder)
ironlake_fdi_disable(crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- }
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
@@ -5132,6 +5017,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_pll_disable(intel_crtc);
}
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5155,7 +5041,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- intel_disable_pipe(intel_crtc);
+ /* XXX: Do the pipe assertions at the right place for BXT DSI. */
+ if (!intel_crtc->config->has_dsi_encoder)
+ intel_disable_pipe(intel_crtc);
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
@@ -5330,6 +5218,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
mask |= BIT(intel_display_port_power_domain(intel_encoder));
}
+ if (crtc_state->shared_dpll)
+ mask |= BIT(POWER_DOMAIN_PLLS);
+
return mask;
}
@@ -5393,6 +5284,8 @@ static void intel_update_max_cdclk(struct drm_device *dev)
dev_priv->max_cdclk_freq = 450000;
else
dev_priv->max_cdclk_freq = 337500;
+ } else if (IS_BROXTON(dev)) {
+ dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev)) {
/*
* FIXME with extra cooling we can allow
@@ -5452,9 +5345,8 @@ static void intel_update_cdclk(struct drm_device *dev)
intel_update_max_cdclk(dev);
}
-static void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t divider;
uint32_t ratio;
uint32_t current_freq;
@@ -5568,33 +5460,46 @@ static void broxton_set_cdclk(struct drm_device *dev, int frequency)
return;
}
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv->dev);
}
-void broxton_init_cdclk(struct drm_device *dev)
+static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val;
+ if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
+ return false;
- /*
- * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
- * or else the reset will hang because there is no PCH to respond.
- * Move the handshake programming to initialization sequence.
- * Previously was left up to BIOS.
- */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+ /* TODO: Check for a valid CDCLK rate */
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
+ DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+
+ return false;
+ }
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
+ DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
- /* Enable PG1 for cdclk */
- intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+ return false;
+ }
+
+ return true;
+}
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
+{
+ return broxton_cdclk_is_enabled(dev_priv);
+}
+
+void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+{
/* check if cd clock is enabled */
- if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
- DRM_DEBUG_KMS("Display already initialized\n");
+ if (broxton_cdclk_is_enabled(dev_priv)) {
+ DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
return;
}
+ DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
@@ -5602,7 +5507,7 @@ void broxton_init_cdclk(struct drm_device *dev)
* - check if setting the max (or any) cdclk freq is really necessary
* here, it belongs to modeset time
*/
- broxton_set_cdclk(dev, 624000);
+ broxton_set_cdclk(dev_priv, 624000);
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
@@ -5613,10 +5518,8 @@ void broxton_init_cdclk(struct drm_device *dev)
DRM_ERROR("DBuf power enable timeout!\n");
}
-void broxton_uninit_cdclk(struct drm_device *dev)
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
@@ -5626,9 +5529,7 @@ void broxton_uninit_cdclk(struct drm_device *dev)
DRM_ERROR("DBuf power disable timeout!\n");
/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
- broxton_set_cdclk(dev, 19200);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ broxton_set_cdclk(dev_priv, 19200);
}
static const struct skl_cdclk_entry {
@@ -6165,6 +6066,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
int pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
@@ -6174,6 +6077,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_src_size(intel_crtc);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6192,14 +6096,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- if (!intel_crtc->config->has_dsi_encoder) {
- if (IS_CHERRYVIEW(dev)) {
- chv_prepare_pll(intel_crtc, intel_crtc->config);
- chv_enable_pll(intel_crtc, intel_crtc->config);
- } else {
- vlv_prepare_pll(intel_crtc, intel_crtc->config);
- vlv_enable_pll(intel_crtc, intel_crtc->config);
- }
+ if (IS_CHERRYVIEW(dev)) {
+ chv_prepare_pll(intel_crtc, intel_crtc->config);
+ chv_enable_pll(intel_crtc, intel_crtc->config);
+ } else {
+ vlv_prepare_pll(intel_crtc, intel_crtc->config);
+ vlv_enable_pll(intel_crtc, intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -6208,7 +6110,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
i9xx_pfit_enable(intel_crtc);
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
@@ -6235,7 +6137,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6246,6 +6150,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_src_size(intel_crtc);
i9xx_set_pipeconf(intel_crtc);
@@ -6262,7 +6167,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_pfit_enable(intel_crtc);
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
@@ -6300,10 +6205,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
- * We also need to wait on all gmch platforms because of the
- * self-refresh mode constraint explained above.
*/
- intel_wait_for_vblank(dev, pipe);
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -6338,6 +6242,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
{
+ struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
@@ -6349,14 +6254,27 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (to_intel_plane_state(crtc->primary->state)->visible) {
WARN_ON(intel_crtc->unpin_work);
- intel_pre_disable_primary(crtc);
+ intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
to_intel_plane_state(crtc->primary->state)->visible = false;
}
dev_priv->display.crtc_disable(crtc);
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.id);
+
+ WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
+ crtc->state->active = false;
intel_crtc->active = false;
+ crtc->enabled = false;
+ crtc->state->connector_mask = 0;
+ crtc->state->encoder_mask = 0;
+
+ for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+ encoder->base.crtc = NULL;
+
intel_fbc_disable(intel_crtc);
intel_update_watermarks(crtc);
intel_disable_shared_dpll(intel_crtc);
@@ -6399,7 +6317,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
-static void intel_connector_check_state(struct intel_connector *connector)
+static void intel_connector_verify_state(struct intel_connector *connector)
{
struct drm_crtc *crtc = connector->base.state->crtc;
@@ -6569,7 +6487,7 @@ retry:
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -6581,8 +6499,7 @@ retry:
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
- ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
- intel_crtc->pipe, pipe_config);
+ ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6606,7 +6523,7 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
return false;
/* HSW can handle pixel rate up to cdclk? */
- if (IS_HASWELL(dev_priv->dev))
+ if (IS_HASWELL(dev_priv))
return true;
/*
@@ -7134,30 +7051,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
- int num_connectors)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk;
-
- WARN_ON(!crtc_state->base.state);
-
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
- refclk = 100000;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
- } else if (!IS_GEN2(dev)) {
- refclk = 96000;
- } else {
- refclk = 48000;
- }
-
- return refclk;
-}
-
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
@@ -7301,24 +7194,34 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
static void vlv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- u32 dpll, dpll_md;
+ pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- /*
- * Enable DPIO clock input. We should never disable the reference
- * clock for pipe B, since VGA hotplug / manual detection depends
- * on it.
- */
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
- /* We should never disable this, set it here for state tracking */
- if (crtc->pipe == PIPE_B)
- dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- dpll |= DPLL_VCO_ENABLE;
- pipe_config->dpll_hw_state.dpll = dpll;
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!pipe_config->has_dsi_encoder)
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
- dpll_md = (pipe_config->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- pipe_config->dpll_hw_state.dpll_md = dpll_md;
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!pipe_config->has_dsi_encoder)
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
static void vlv_prepare_pll(struct intel_crtc *crtc,
@@ -7326,11 +7229,20 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
+ /* Enable Refclk */
+ I915_WRITE(DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll &
+ ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
mutex_lock(&dev_priv->sb_lock);
bestn = pipe_config->dpll.n;
@@ -7412,32 +7324,26 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
mutex_unlock(&dev_priv->sb_lock);
}
-static void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
- DPLL_VCO_ENABLE;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
- i915_reg_t dpll_reg = DPLL(crtc->pipe);
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
int vco;
+ /* Enable Refclk and SSC */
+ I915_WRITE(DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
bestm1 = pipe_config->dpll.m1;
@@ -7448,12 +7354,6 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
dpio_val = 0;
loopfilter = 0;
- /*
- * Enable Refclk and SSC
- */
- I915_WRITE(dpll_reg,
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
mutex_lock(&dev_priv->sb_lock);
/* p1 and p2 divider */
@@ -7587,8 +7487,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+ intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7647,7 +7546,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7664,8 +7563,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
static void i8xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+ intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7691,7 +7589,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_DVO_2X_MODE;
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7760,6 +7658,14 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+}
+
+static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
@@ -7801,6 +7707,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_vtotal += 1;
pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
}
+}
+
+static void intel_get_pipe_src_size(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
@@ -7898,69 +7812,192 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk, num_connectors = 0;
- intel_clock_t clock;
- bool ok;
const intel_limit_t *limit;
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
+ int refclk = 48000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (crtc_state->has_dsi_encoder)
- return 0;
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ }
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc == &crtc->base)
- num_connectors++;
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
}
- if (!crtc_state->clock_set) {
- refclk = i9xx_get_refclk(crtc_state, num_connectors);
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
- /*
- * Returns a set of divisors for the desired target clock with
- * the given refclk, or FALSE. The returned values represent
- * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
- * 2) / p1 / p2.
- */
- limit = intel_limit(crtc_state, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc_state,
- crtc_state->port_clock,
- refclk, NULL, &clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
+ i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- /* Compat-code for transition, will disappear. */
- crtc_state->dpll.n = clock.n;
- crtc_state->dpll.m1 = clock.m1;
- crtc_state->dpll.m2 = clock.m2;
- crtc_state->dpll.p1 = clock.p1;
- crtc_state->dpll.p2 = clock.p2;
+ limit = &intel_limits_pineview_lvds;
+ } else {
+ limit = &intel_limits_pineview_sdvo;
}
- if (IS_GEN2(dev)) {
- i8xx_compute_dpll(crtc, crtc_state, NULL,
- num_connectors);
- } else if (IS_CHERRYVIEW(dev)) {
- chv_compute_dpll(crtc, crtc_state);
- } else if (IS_VALLEYVIEW(dev)) {
- vlv_compute_dpll(crtc, crtc_state);
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
} else {
- i9xx_compute_dpll(crtc, crtc_state, NULL,
- num_connectors);
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
}
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const intel_limit_t *limit = &intel_limits_chv;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ chv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const intel_limit_t *limit = &intel_limits_vlv;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc, crtc_state);
+
return 0;
}
@@ -8001,8 +8038,8 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
u32 mdiv;
int refclk = 100000;
- /* In case of MIPI DPLL will not even be used */
- if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
mutex_lock(&dev_priv->sb_lock);
@@ -8098,6 +8135,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
mutex_lock(&dev_priv->sb_lock);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
@@ -8131,7 +8172,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ pipe_config->shared_dpll = NULL;
ret = false;
@@ -8163,11 +8204,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
i9xx_get_pfit_config(crtc, pipe_config);
if (INTEL_INFO(dev)->gen >= 4) {
- tmp = I915_READ(DPLL_MD(crtc->pipe));
+ /* No way to read it out on pipes B and C */
+ if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+ tmp = dev_priv->chv_dpll_md[crtc->pipe];
+ else
+ tmp = I915_READ(DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
@@ -8401,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
DRM_ERROR("FDI mPHY reset assert timeout\n");
tmp = I915_READ(SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
@@ -8656,42 +8702,6 @@ void intel_init_pch_refclk(struct drm_device *dev)
lpt_init_pch_refclk(dev);
}
-static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
- int num_connectors = 0, i;
- bool is_lvds = false;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- default:
- break;
- }
- num_connectors++;
- }
-
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- return dev_priv->vbt.lvds_ssc_freq;
- }
-
- return 120000;
-}
-
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -8734,82 +8744,14 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
POSTING_READ(PIPECONF(pipe));
}
-/*
- * Set up the pipe CSC unit.
- *
- * Currently only full range RGB to limited range RGB conversion
- * is supported, but eventually this should handle various
- * RGB<->YCbCr scenarios as well.
- */
-static void intel_set_pipe_csc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- uint16_t coeff = 0x7800; /* 1.0 */
-
- /*
- * TODO: Check what kind of values actually come out of the pipe
- * with these coeff/postoff values and adjust to get the best
- * accuracy. Perhaps we even need to take the bpc value into
- * consideration.
- */
-
- if (intel_crtc->config->limited_color_range)
- coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
-
- /*
- * GY/GU and RY/RU should be the other way around according
- * to BSpec, but reality doesn't agree. Just set them up in
- * a way that results in the correct picture.
- */
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
-
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
-
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
-
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
-
- if (INTEL_INFO(dev)->gen > 6) {
- uint16_t postoff = 0;
-
- if (intel_crtc->config->limited_color_range)
- postoff = (16 * (1 << 12) / 255) & 0x1fff;
-
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
-
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
- } else {
- uint32_t mode = CSC_MODE_YUV_TO_RGB;
-
- if (intel_crtc->config->limited_color_range)
- mode |= CSC_BLACK_SCREEN_OFFSET;
-
- I915_WRITE(PIPE_CSC_MODE(pipe), mode);
- }
-}
-
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- uint32_t val;
-
- val = 0;
+ u32 val = 0;
- if (IS_HASWELL(dev) && intel_crtc->config->dither)
+ if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -8819,12 +8761,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
+}
- I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
- POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+static void haswell_set_pipemisc(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
- val = 0;
+ if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
+ u32 val = 0;
switch (intel_crtc->config->pipe_bpp) {
case 18:
@@ -8847,39 +8792,10 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
- I915_WRITE(PIPEMISC(pipe), val);
+ I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
}
-static bool ironlake_compute_clocks(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- intel_clock_t *clock,
- bool *has_reduced_clock,
- intel_clock_t *reduced_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk;
- const intel_limit_t *limit;
- bool ret;
-
- refclk = ironlake_get_refclk(crtc_state);
-
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc_state, refclk);
- ret = dev_priv->display.find_dpll(limit, crtc_state,
- crtc_state->port_clock,
- refclk, NULL, clock);
- if (!ret)
- return false;
-
- return true;
-}
-
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
@@ -8896,10 +8812,9 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- u32 *fp,
- intel_clock_t *reduced_clock, u32 *fp2)
+static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
+ intel_clock_t *reduced_clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
@@ -8908,8 +8823,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- uint32_t dpll;
- int factor, num_connectors = 0, i;
+ u32 dpll, fp, fp2;
+ int factor, i;
bool is_lvds = false, is_sdvo = false;
for_each_connector_in_state(state, connector, connector_state, i) {
@@ -8929,8 +8844,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
default:
break;
}
-
- num_connectors++;
}
/* Enable autotuning of the PLL clock (if permissible) */
@@ -8943,11 +8856,19 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
- *fp |= FP_CB_TUNE;
+ fp |= FP_CB_TUNE;
+
+ if (reduced_clock) {
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
- if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
- *fp2 |= FP_CB_TUNE;
+ if (reduced_clock->m < factor * reduced_clock->n)
+ fp2 |= FP_CB_TUNE;
+ } else {
+ fp2 = fp;
+ }
dpll = 0;
@@ -8984,76 +8905,80 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
break;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ if (is_lvds && intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
- return dpll | DPLL_VCO_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
}
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t reduced_clock;
+ bool has_reduced_clock = false;
struct intel_shared_dpll *pll;
+ const intel_limit_t *limit;
+ int refclk = 120000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+ crtc->lowfreq_avail = false;
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
- WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
- "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ }
- ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
- &has_reduced_clock, &reduced_clock);
- if (!ok && !crtc_state->clock_set) {
+ if (intel_is_dual_link_lvds(dev)) {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else {
+ limit = &intel_limits_ironlake_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
- /* Compat-code for transition, will disappear. */
- if (!crtc_state->clock_set) {
- crtc_state->dpll.n = clock.n;
- crtc_state->dpll.m1 = clock.m1;
- crtc_state->dpll.m2 = clock.m2;
- crtc_state->dpll.p1 = clock.p1;
- crtc_state->dpll.p2 = clock.p2;
- }
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (crtc_state->has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (has_reduced_clock)
- fp2 = i9xx_dpll_compute_fp(&reduced_clock);
-
- dpll = ironlake_compute_dpll(crtc, crtc_state,
- &fp, &reduced_clock,
- has_reduced_clock ? &fp2 : NULL);
-
- crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- if (has_reduced_clock)
- crtc_state->dpll_hw_state.fp1 = fp2;
- else
- crtc_state->dpll_hw_state.fp1 = fp;
+ ironlake_compute_dpll(crtc, crtc_state,
+ has_reduced_clock ? &reduced_clock : NULL);
- pll = intel_get_shared_dpll(crtc, crtc_state);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
+ pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
}
- if (is_lvds && has_reduced_clock)
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ has_reduced_clock)
crtc->lowfreq_avail = true;
- else
- crtc->lowfreq_avail = false;
return 0;
}
@@ -9355,7 +9280,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ pipe_config->shared_dpll = NULL;
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9384,6 +9309,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
+ enum intel_dpll_id pll_id;
pipe_config->has_pch_encoder = true;
@@ -9393,21 +9319,22 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
- if (HAS_PCH_IBX(dev_priv->dev)) {
- pipe_config->shared_dpll =
- (enum intel_dpll_id) crtc->pipe;
+ if (HAS_PCH_IBX(dev_priv)) {
+ pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
- pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
+ pll_id = DPLL_ID_PCH_PLL_B;
else
- pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
+ pll_id= DPLL_ID_PCH_PLL_A;
}
- pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+ pipe_config->shared_dpll =
+ intel_get_shared_dpll_by_id(dev_priv, pll_id);
+ pll = pipe_config->shared_dpll;
- WARN_ON(!pll->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -9420,6 +9347,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
}
intel_get_pipe_timings(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
ironlake_get_pfit_config(crtc, pipe_config);
@@ -9512,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@@ -9586,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
}
@@ -9659,7 +9587,7 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
to_intel_atomic_state(old_state);
unsigned int req_cdclk = old_intel_state->dev_cdclk;
- broxton_set_cdclk(dev, req_cdclk);
+ broxton_set_cdclk(to_i915(dev), req_cdclk);
}
/* compute the max rate for new configuration */
@@ -9727,8 +9655,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@@ -9762,8 +9690,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
mutex_lock(&dev_priv->rps.hw_lock);
@@ -9842,72 +9770,193 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
+ enum intel_dpll_id id;
+
switch (port) {
case PORT_A:
pipe_config->ddi_pll_sel = SKL_DPLL0;
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ id = DPLL_ID_SKL_DPLL0;
break;
case PORT_B:
pipe_config->ddi_pll_sel = SKL_DPLL1;
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ id = DPLL_ID_SKL_DPLL1;
break;
case PORT_C:
pipe_config->ddi_pll_sel = SKL_DPLL2;
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ id = DPLL_ID_SKL_DPLL2;
break;
default:
DRM_ERROR("Incorrect port type\n");
+ return;
}
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
- u32 temp, dpll_ctl1;
+ enum intel_dpll_id id;
+ u32 temp;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
switch (pipe_config->ddi_pll_sel) {
case SKL_DPLL0:
- /*
- * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
- * of the shared DPLL framework and thus needs to be read out
- * separately
- */
- dpll_ctl1 = I915_READ(DPLL_CTRL1);
- pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+ id = DPLL_ID_SKL_DPLL0;
break;
case SKL_DPLL1:
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ id = DPLL_ID_SKL_DPLL1;
break;
case SKL_DPLL2:
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ id = DPLL_ID_SKL_DPLL2;
break;
case SKL_DPLL3:
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ id = DPLL_ID_SKL_DPLL3;
break;
+ default:
+ MISSING_CASE(pipe_config->ddi_pll_sel);
+ return;
}
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
+ enum intel_dpll_id id;
+
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
switch (pipe_config->ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
- pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ id = DPLL_ID_WRPLL1;
break;
case PORT_CLK_SEL_WRPLL2:
- pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ id = DPLL_ID_WRPLL2;
break;
case PORT_CLK_SEL_SPLL:
- pipe_config->shared_dpll = DPLL_ID_SPLL;
+ id = DPLL_ID_SPLL;
+ break;
+ case PORT_CLK_SEL_LCPLL_810:
+ id = DPLL_ID_LCPLL_810;
break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ id = DPLL_ID_LCPLL_1350;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ MISSING_CASE(pipe_config->ddi_pll_sel);
+ /* fall through */
+ case PORT_CLK_SEL_NONE:
+ return;
}
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ unsigned long *power_domain_mask)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+
+ /*
+ * XXX: Do intel_display_power_get_if_enabled before reading this (for
+ * consistency and less surprising code; it's in always on power).
+ */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ enum pipe trans_edp_pipe;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ WARN(1, "unknown pipe linked to edp transcoder\n");
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ trans_edp_pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ trans_edp_pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ trans_edp_pipe = PIPE_C;
+ break;
+ }
+
+ if (trans_edp_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ }
+
+ power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+ *power_domain_mask |= BIT(power_domain);
+
+ tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+ return tmp & PIPECONF_ENABLE;
+}
+
+static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ unsigned long *power_domain_mask)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ enum port port;
+ enum transcoder cpu_transcoder;
+ u32 tmp;
+
+ pipe_config->has_dsi_encoder = false;
+
+ for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_DSI_A;
+ else
+ cpu_transcoder = TRANSCODER_DSI_C;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ continue;
+ *power_domain_mask |= BIT(power_domain);
+
+ /*
+ * The PLL needs to be enabled with a valid divider
+ * configuration, otherwise accessing DSI registers will hang
+ * the machine. See BSpec North Display Engine
+ * registers/MIPI[BXT]. We can break out here early, since we
+ * need the same DSI PLL to be enabled for both DSI ports.
+ */
+ if (!intel_dsi_pll_is_enabled(dev_priv))
+ break;
+
+ /* XXX: this works for video mode only */
+ tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+ if (!(tmp & DPI_ENABLE))
+ continue;
+
+ tmp = I915_READ(MIPI_CTRL(port));
+ if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
+ continue;
+
+ pipe_config->cpu_transcoder = cpu_transcoder;
+ pipe_config->has_dsi_encoder = true;
+ break;
+ }
+
+ return pipe_config->has_dsi_encoder;
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
@@ -9930,11 +9979,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
- if (pipe_config->shared_dpll >= 0) {
- pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
-
- WARN_ON(!pll->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ pll = pipe_config->shared_dpll;
+ if (pll) {
+ WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
}
/*
@@ -9961,53 +10009,37 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
- uint32_t tmp;
- bool ret;
+ bool active;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
power_domain_mask = BIT(power_domain);
- ret = false;
+ pipe_config->shared_dpll = NULL;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
- switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
- default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
- case TRANS_DDI_EDP_INPUT_A_ONOFF:
- case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
- break;
- case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
- break;
- case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
- break;
- }
-
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (IS_BROXTON(dev_priv)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config,
+ &power_domain_mask);
+ WARN_ON(active && pipe_config->has_dsi_encoder);
+ if (pipe_config->has_dsi_encoder)
+ active = true;
}
- power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!active)
goto out;
- power_domain_mask |= BIT(power_domain);
- tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
- if (!(tmp & PIPECONF_ENABLE))
- goto out;
+ if (!pipe_config->has_dsi_encoder) {
+ haswell_get_ddi_port_state(crtc, pipe_config);
+ intel_get_pipe_timings(crtc, pipe_config);
+ }
- haswell_get_ddi_port_state(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
- intel_get_pipe_timings(crtc, pipe_config);
+ pipe_config->gamma_mode =
+ I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
if (INTEL_INFO(dev)->gen >= 9) {
skl_init_scalers(dev, crtc, pipe_config);
@@ -10031,20 +10063,19 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
- if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
+ if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
- ret = true;
-
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv, power_domain);
- return ret;
+ return active;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
@@ -10237,21 +10268,6 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
-{
- int end = (start + size > 256) ? 256 : start + size, i;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- for (i = start; i < end; i++) {
- intel_crtc->lut_r[i] = red[i] >> 8;
- intel_crtc->lut_g[i] = green[i] >> 8;
- intel_crtc->lut_b[i] = blue[i] >> 8;
- }
-
- intel_crtc_load_lut(crtc);
-}
-
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -10739,19 +10755,18 @@ int intel_dotclock_calculate(int link_freq,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
i9xx_crtc_clock_get(crtc, pipe_config);
/*
- * This value does not include pixel_multiplier.
- * We will check that port_clock and adjusted_mode.crtc_clock
- * agree once we know their relationship in the encoder's
- * get_config() function.
+ * In case there is an active pipe without active ports,
+ * we may need some idea for the dotclock anyway.
+ * Calculate one based on the FDI configuration.
*/
pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+ intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
}
@@ -10870,7 +10885,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
struct drm_plane *primary = crtc->base.primary;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb, primary->state);
+ intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
if (work->flip_queued_req)
@@ -10944,9 +10959,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reset_counter;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (crtc->reset_counter != reset_counter)
return true;
/*
@@ -11024,7 +11040,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@@ -11040,13 +11056,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11059,7 +11075,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@@ -11072,13 +11088,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, MI_NOOP);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11091,7 +11107,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@@ -11105,10 +11121,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11117,7 +11133,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11130,7 +11146,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@@ -11140,10 +11156,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
if (ret)
return ret;
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11153,7 +11169,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11166,7 +11182,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
@@ -11187,7 +11203,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
}
len = 4;
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11225,36 +11241,36 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
- if (ring->id == RCS) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
+ if (engine->id == RCS) {
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, DERRMR);
+ intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
+ intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
+ intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_emit_reg(engine, DERRMR);
+ intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
}
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
-static bool use_mmio_flip(struct intel_engine_cs *ring,
+static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
/*
@@ -11265,10 +11281,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
* So using MMIO flips there would disrupt this mechanism.
*/
- if (ring == NULL)
+ if (engine == NULL)
return true;
- if (INTEL_INFO(ring->dev)->gen < 5)
+ if (INTEL_INFO(engine->dev)->gen < 5)
return false;
if (i915.use_mmio_flip < 0)
@@ -11282,7 +11298,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
false))
return true;
else
- return ring != i915_gem_request_get_ring(obj->last_write_req);
+ return engine != i915_gem_request_get_engine(obj->last_write_req);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11400,7 +11416,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
- mmio_flip->crtc->reset_counter,
false, NULL,
&mmio_flip->i915->rps.mmioflips));
i915_gem_request_unreference__unlocked(mmio_flip->req);
@@ -11528,7 +11543,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
int ret;
@@ -11608,28 +11623,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup;
+ intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+ ret = -EIO;
+ goto cleanup;
+ }
+
atomic_inc(&intel_crtc->unpin_work_count);
- intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- ring = &dev_priv->ring[BCS];
+ engine = &dev_priv->engine[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
- ring = NULL;
+ engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- ring = &dev_priv->ring[BCS];
+ engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = i915_gem_request_get_ring(obj->last_write_req);
- if (ring == NULL || ring->id != RCS)
- ring = &dev_priv->ring[BCS];
+ engine = i915_gem_request_get_engine(obj->last_write_req);
+ if (engine == NULL || engine->id != RCS)
+ engine = &dev_priv->engine[BCS];
} else {
- ring = &dev_priv->ring[RCS];
+ engine = &dev_priv->engine[RCS];
}
- mmio_flip = use_mmio_flip(ring, obj);
+ mmio_flip = use_mmio_flip(engine, obj);
/* When using CS flips, we want to emit semaphores between rings.
* However, when using mmio flips we will create a task to do the
@@ -11637,13 +11657,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
- ret = i915_gem_object_sync(obj, ring, &request);
+ ret = i915_gem_object_sync(obj, engine, &request);
if (ret)
goto cleanup_pending;
}
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
- crtc->primary->state);
+ ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
if (ret)
goto cleanup_pending;
@@ -11660,7 +11679,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj->last_write_req);
} else {
if (!request) {
- request = i915_gem_request_alloc(ring, NULL);
+ request = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
@@ -11693,10 +11712,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_unpin:
- intel_unpin_fb_obj(fb, crtc->primary->state);
+ intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
if (!IS_ERR_OR_NULL(request))
- i915_gem_request_cancel(request);
+ i915_add_request_no_flush(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
@@ -11746,7 +11765,7 @@ retry:
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
- drm_send_vblank_event(dev, pipe, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&dev->event_lock);
}
}
@@ -11806,6 +11825,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
int idx = intel_crtc->base.base.id, ret;
@@ -11872,32 +11892,25 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
pipe_config->update_wm_post = true;
}
- if (visible || was_visible)
- intel_crtc->atomic.fb_bits |=
- to_intel_plane(plane)->frontbuffer_bit;
+ /* Pre-gen9 platforms need two-step watermark updates */
+ if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
+ INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- intel_crtc->atomic.post_enable_primary = turn_on;
- intel_crtc->atomic.update_fbc = true;
+ if (visible || was_visible)
+ pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
- break;
- case DRM_PLANE_TYPE_CURSOR:
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- /*
- * WaCxSRDisabledForSpriteScaling:ivb
- *
- * cstate->update_wm was already set above, so this flag will
- * take effect when we commit and program watermarks.
- */
- if (IS_IVYBRIDGE(dev) &&
- needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state))
- pipe_config->disable_lp_wm = true;
+ /*
+ * WaCxSRDisabledForSpriteScaling:ivb
+ *
+ * cstate->update_wm was already set above, so this flag will
+ * take effect when we commit and program watermarks.
+ */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ needs_scaling(to_intel_plane_state(plane_state)) &&
+ !needs_scaling(old_plane_state))
+ pipe_config->disable_lp_wm = true;
- break;
- }
return 0;
}
@@ -11973,18 +11986,54 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
if (mode_changed && crtc_state->enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+ !WARN_ON(pipe_config->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(intel_crtc,
pipe_config);
if (ret)
return ret;
}
+ if (crtc_state->color_mgmt_changed) {
+ ret = intel_color_check(crtc, crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Changing color management on Intel hardware is
+ * handled as part of planes update.
+ */
+ crtc_state->planes_changed = true;
+ }
+
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
- if (ret)
+ ret = dev_priv->display.compute_pipe_wm(pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
return ret;
+ }
+ }
+
+ if (dev_priv->display.compute_intermediate_wm &&
+ !to_intel_atomic_state(state)->skip_intermediate_wm) {
+ if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+ return 0;
+
+ /*
+ * Calculate 'intermediate' watermarks that satisfy both the
+ * old state and the new state. We can program these
+ * immediately.
+ */
+ ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ intel_crtc,
+ pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+ return ret;
+ }
+ } else if (dev_priv->display.compute_intermediate_wm) {
+ if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
+ pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -12001,7 +12050,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
.atomic_begin = intel_begin_crtc_commit,
.atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
@@ -12012,11 +12060,16 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
struct intel_connector *connector;
for_each_intel_connector(dev, connector) {
+ if (connector->base.state->crtc)
+ drm_connector_unreference(&connector->base);
+
if (connector->base.encoder) {
connector->base.state->best_encoder =
connector->base.encoder;
connector->base.state->crtc =
connector->base.encoder->crtc;
+
+ drm_connector_reference(&connector->base);
} else {
connector->base.state->best_encoder = NULL;
connector->base.state->crtc = NULL;
@@ -12118,7 +12171,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
context, pipe_config, pipe_name(crtc->pipe));
- DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+ DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
pipe_config->pipe_bpp, pipe_config->dither);
DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
@@ -12194,7 +12247,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
} else if (HAS_DDI(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
@@ -12297,7 +12350,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct drm_crtc_state tmp_state;
struct intel_crtc_scaler_state scaler_state;
struct intel_dpll_hw_state dpll_hw_state;
- enum intel_dpll_id shared_dpll;
+ struct intel_shared_dpll *shared_dpll;
uint32_t ddi_pll_sel;
bool force_thru;
@@ -12567,6 +12620,15 @@ intel_pipe_config_compare(struct drm_device *dev,
ret = false; \
}
+#define PIPE_CONF_CHECK_P(name) \
+ if (current_config->name != pipe_config->name) { \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ "(expected %p, found %p)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ ret = false; \
+ }
+
#define PIPE_CONF_CHECK_M_N(name) \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
@@ -12587,6 +12649,11 @@ intel_pipe_config_compare(struct drm_device *dev,
ret = false; \
}
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name, adjust) && \
@@ -12614,22 +12681,6 @@ intel_pipe_config_compare(struct drm_device *dev,
ret = false; \
}
-/* This is required for BDW+ where there is only one set of registers for
- * switching between high and low RR.
- * This macro can be used whenever a comparison has to be made between one
- * hw state and multiple sw state variables.
- */
-#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
- if ((current_config->name != pipe_config->name) && \
- (current_config->alt_name != pipe_config->name)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
- "(expected %i or %i, found %i)\n", \
- current_config->name, \
- current_config->alt_name, \
- pipe_config->name); \
- ret = false; \
- }
-
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
@@ -12710,7 +12761,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
if (!adjust) {
@@ -12734,7 +12785,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(ddi_pll_sel);
- PIPE_CONF_CHECK_I(shared_dpll);
+ PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
@@ -12745,6 +12796,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+ PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+ PIPE_CONF_CHECK_X(dsi_pll.div);
+
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -12753,7 +12807,7 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
-#undef PIPE_CONF_CHECK_I_ALT
+#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
@@ -12762,48 +12816,61 @@ intel_pipe_config_compare(struct drm_device *dev,
return ret;
}
-static void check_wm_state(struct drm_device *dev)
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
{
+ if (pipe_config->has_pch_encoder) {
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+ &pipe_config->fdi_m_n);
+ int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
+ }
+}
+
+static void verify_wm_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct intel_crtc *intel_crtc;
+ struct skl_ddb_entry *hw_entry, *sw_entry;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const enum pipe pipe = intel_crtc->pipe;
int plane;
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
return;
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
- for_each_intel_crtc(dev, intel_crtc) {
- struct skl_ddb_entry *hw_entry, *sw_entry;
- const enum pipe pipe = intel_crtc->pipe;
+ /* planes */
+ for_each_plane(dev_priv, pipe, plane) {
+ hw_entry = &hw_ddb.plane[pipe][plane];
+ sw_entry = &sw_ddb->plane[pipe][plane];
- if (!intel_crtc->active)
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
continue;
- /* planes */
- for_each_plane(dev_priv, pipe, plane) {
- hw_entry = &hw_ddb.plane[pipe][plane];
- sw_entry = &sw_ddb->plane[pipe][plane];
-
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
-
- DRM_ERROR("mismatch in DDB state pipe %c plane %d "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
- }
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
- /* cursor */
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
+ /* cursor */
+ hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor "
"(expected (%u,%u), found (%u,%u))\n",
pipe_name(pipe),
@@ -12813,20 +12880,18 @@ static void check_wm_state(struct drm_device *dev)
}
static void
-check_connector_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
struct drm_connector *connector;
- int i;
- for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ drm_for_each_connector(connector, dev) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
- /* This also checks the encoder/connector hw state with the
- * ->get_hw_state callbacks. */
- intel_connector_check_state(to_intel_connector(connector));
+ if (state->crtc != crtc)
+ continue;
+
+ intel_connector_verify_state(to_intel_connector(connector));
I915_STATE_WARN(state->best_encoder != encoder,
"connector's atomic encoder doesn't match legacy encoder\n");
@@ -12834,7 +12899,7 @@ check_connector_state(struct drm_device *dev,
}
static void
-check_encoder_state(struct drm_device *dev)
+verify_encoder_state(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -12874,149 +12939,186 @@ check_encoder_state(struct drm_device *dev)
}
static void
-check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
+verify_crtc_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_crtc_state *old_crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config, *sw_config;
- bool active;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config, *sw_config;
+ struct drm_atomic_state *old_state;
+ bool active;
- if (!needs_modeset(crtc->state) &&
- !to_intel_crtc_state(crtc->state)->update_pipe)
- continue;
+ old_state = old_crtc_state->state;
+ __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
+ pipe_config = to_intel_crtc_state(old_crtc_state);
+ memset(pipe_config, 0, sizeof(*pipe_config));
+ pipe_config->base.crtc = crtc;
+ pipe_config->base.state = old_state;
- __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
- pipe_config = to_intel_crtc_state(old_crtc_state);
- memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = crtc;
- pipe_config->base.state = old_state;
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- DRM_DEBUG_KMS("[CRTC:%d]\n",
- crtc->base.id);
+ active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
- active = dev_priv->display.get_pipe_config(intel_crtc,
- pipe_config);
+ /* hw state is inconsistent with the pipe quirk */
+ if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+ (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+ active = new_crtc_state->active;
- /* hw state is inconsistent with the pipe quirk */
- if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
- (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
- active = crtc->state->active;
+ I915_STATE_WARN(new_crtc_state->active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", new_crtc_state->active, active);
- I915_STATE_WARN(crtc->state->active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", crtc->state->active, active);
+ I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
- I915_STATE_WARN(intel_crtc->active != crtc->state->active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ enum pipe pipe;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- enum pipe pipe;
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != new_crtc_state->active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active, new_crtc_state->active);
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != crtc->state->active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, crtc->state->active);
+ I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
- I915_STATE_WARN(active && intel_crtc->pipe != pipe,
- "Encoder connected to wrong pipe %c\n",
- pipe_name(pipe));
+ if (active)
+ encoder->get_config(encoder, pipe_config);
+ }
- if (active)
- encoder->get_config(encoder, pipe_config);
- }
+ if (!new_crtc_state->active)
+ return;
- if (!crtc->state->active)
- continue;
+ intel_pipe_config_sanity_check(dev_priv, pipe_config);
- sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
- pipe_config, false)) {
- I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(intel_crtc, pipe_config,
- "[hw state]");
- intel_dump_pipe_config(intel_crtc, sw_config,
- "[sw state]");
- }
+ sw_config = to_intel_crtc_state(crtc->state);
+ if (!intel_pipe_config_compare(dev, sw_config,
+ pipe_config, false)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
+ intel_dump_pipe_config(intel_crtc, pipe_config,
+ "[hw state]");
+ intel_dump_pipe_config(intel_crtc, sw_config,
+ "[sw state]");
}
}
static void
-check_shared_dpll_state(struct drm_device *dev)
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
struct intel_dpll_hw_state dpll_hw_state;
- int i;
+ unsigned crtc_mask;
+ bool active;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- int enabled_crtcs = 0, active_crtcs = 0;
- bool active;
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+ DRM_DEBUG_KMS("%s\n", pll->name);
- DRM_DEBUG_KMS("%s\n", pll->name);
+ active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
- active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
-
- I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
- "more active pll users than references: %i vs %i\n",
- pll->active, hweight32(pll->config.crtc_mask));
- I915_STATE_WARN(pll->active && !pll->on,
+ if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
+ I915_STATE_WARN(!pll->on && pll->active_mask,
"pll in active use but not on in sw tracking\n");
- I915_STATE_WARN(pll->on && !pll->active,
- "pll in on but not on in use in sw tracking\n");
+ I915_STATE_WARN(pll->on && !pll->active_mask,
+ "pll is on but not used by any active crtc\n");
I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
+ }
- for_each_intel_crtc(dev, crtc) {
- if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
- enabled_crtcs++;
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
- active_crtcs++;
- }
- I915_STATE_WARN(pll->active != active_crtcs,
- "pll active crtcs mismatch (expected %i, found %i)\n",
- pll->active, active_crtcs);
- I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
- "pll enabled crtcs mismatch (expected %i, found %i)\n",
- hweight32(pll->config.crtc_mask), enabled_crtcs);
+ if (!crtc) {
+ I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
+ "more active pll users than references: %x vs %x\n",
+ pll->active_mask, pll->config.crtc_mask);
- I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
- sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
+ return;
}
+
+ crtc_mask = 1 << drm_crtc_index(crtc);
+
+ if (new_state->active)
+ I915_STATE_WARN(!(pll->active_mask & crtc_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+ pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ else
+ I915_STATE_WARN(pll->active_mask & crtc_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+ pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+
+ I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
+ crtc_mask, pll->config.crtc_mask);
+
+ I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
+ &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "pll hw state mismatch\n");
}
static void
-intel_modeset_check_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
{
- check_wm_state(dev);
- check_connector_state(dev, old_state);
- check_encoder_state(dev);
- check_crtc_state(dev, old_state);
- check_shared_dpll_state(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
+ struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+
+ if (new_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+
+ if (old_state->shared_dpll &&
+ old_state->shared_dpll != new_state->shared_dpll) {
+ unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+ struct intel_shared_dpll *pll = old_state->shared_dpll;
+
+ I915_STATE_WARN(pll->active_mask & crtc_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask)\n",
+ pipe_name(drm_crtc_index(crtc)));
+ I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask)\n",
+ pipe_name(drm_crtc_index(crtc)));
+ }
}
-void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
- int dotclock)
+static void
+intel_modeset_verify_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
- /*
- * FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
- */
- WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- pipe_config->base.adjusted_mode.crtc_clock, dotclock);
+ if (!needs_modeset(new_state) &&
+ !to_intel_crtc_state(new_state)->update_pipe)
+ return;
+
+ verify_wm_state(crtc, new_state);
+ verify_connector_state(crtc->dev, crtc);
+ verify_crtc_state(crtc, old_state, new_state);
+ verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+}
+
+static void
+verify_disabled_dpll_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+}
+
+static void
+intel_modeset_verify_disabled(struct drm_device *dev)
+{
+ verify_encoder_state(dev);
+ verify_connector_state(dev, NULL);
+ verify_disabled_dpll_state(dev);
}
static void update_scanline_offset(struct intel_crtc *crtc)
@@ -13071,20 +13173,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
+ struct intel_shared_dpll *old_dpll =
+ to_intel_crtc_state(crtc->state)->shared_dpll;
if (!needs_modeset(crtc_state))
continue;
- to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
+ to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
- if (old_dpll == DPLL_ID_PRIVATE)
+ if (!old_dpll)
continue;
if (!shared_dpll)
shared_dpll = intel_atomic_get_shared_dpll_state(state);
- shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+ intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
}
}
@@ -13296,9 +13399,6 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- memset(&to_intel_crtc(crtc)->atomic, 0,
- sizeof(struct intel_crtc_atomic_commit));
-
/* Catch I915_MODE_FLAG_INHERITED */
if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
crtc_state->mode_changed = true;
@@ -13364,7 +13464,7 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane_state *plane_state;
@@ -13373,8 +13473,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- if (async) {
- DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ if (nonblock) {
+ DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
return -EINVAL;
}
@@ -13395,12 +13495,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
- u32 reset_counter;
-
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ if (!ret && !nonblock) {
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
@@ -13409,25 +13506,18 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
continue;
ret = __i915_wait_request(intel_plane_state->wait_req,
- reset_counter, true,
- NULL, NULL);
-
- /* Swallow -EIO errors to allow updates during hw lockup. */
- if (ret == -EIO)
- ret = 0;
-
- if (ret)
+ true, NULL, NULL);
+ if (ret) {
+ /* Any hang should be swallowed by the wait */
+ WARN_ON(ret == -EIO);
+ mutex_lock(&dev->struct_mutex);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ mutex_unlock(&dev->struct_mutex);
break;
+ }
}
-
- if (!ret)
- return 0;
-
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
}
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -13469,7 +13559,7 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
- WARN_ON(!lret);
+ WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
drm_crtc_vblank_put(crtc);
}
@@ -13500,39 +13590,41 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
* intel_atomic_commit - commit validated state object
* @dev: DRM device
* @state: the top-level driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
*
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
* FIXME: Atomic modeset support for i915 is not yet complete. At the moment
* we can only handle plane-related operations and do not yet support
- * asynchronous commit.
+ * nonblocking commit.
*
* RETURNS
* Zero for success or -errno.
*/
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
+ struct intel_crtc_state *intel_cstate;
int ret = 0, i;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
- ret = intel_atomic_prepare_commit(dev, state, async);
+ ret = intel_atomic_prepare_commit(dev, state, nonblock);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
return ret;
}
drm_atomic_helper_swap_state(dev, state);
- dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
+ dev_priv->wm.config = intel_state->wm_config;
+ intel_shared_dpll_commit(state);
if (intel_state->modeset) {
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13543,7 +13635,7 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
}
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (needs_modeset(crtc->state) ||
@@ -13558,10 +13650,10 @@ static int intel_atomic_commit(struct drm_device *dev,
if (!needs_modeset(crtc->state))
continue;
- intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
- if (crtc_state->active) {
- intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+ if (old_crtc_state->active) {
+ intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
@@ -13584,17 +13676,17 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_modeset_update_crtc_state(state);
if (intel_state->modeset) {
- intel_shared_dpll_commit(state);
-
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
if (dev_priv->display.modeset_commit_cdclk &&
intel_state->dev_cdclk != dev_priv->cdclk_freq)
dev_priv->display.modeset_commit_cdclk(state);
+
+ intel_modeset_verify_disabled(dev);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
struct intel_crtc_state *pipe_config =
@@ -13607,14 +13699,15 @@ static int intel_atomic_commit(struct drm_device *dev,
}
if (!modeset)
- intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
- if (crtc->state->active && intel_crtc->atomic.update_fbc)
+ if (crtc->state->active &&
+ drm_atomic_get_existing_plane_state(state, crtc->primary))
intel_fbc_enable(intel_crtc);
if (crtc->state->active &&
(crtc->state->planes_changed || update_pipe))
- drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+ drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
if (pipe_config->base.active && needs_vblank_wait(pipe_config))
crtc_vblank_mask |= 1 << i;
@@ -13625,11 +13718,27 @@ static int intel_atomic_commit(struct drm_device *dev,
if (!state->legacy_cursor_update)
intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- intel_post_plane_update(to_intel_crtc(crtc));
+ /*
+ * Now that the vblank has passed, we can go ahead and program the
+ * optimal watermarks on platforms that need two-step watermark
+ * programming.
+ *
+ * TODO: Move this (and other cleanup) to an async worker eventually.
+ */
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ intel_cstate = to_intel_crtc_state(crtc->state);
+
+ if (dev_priv->display.optimize_watermarks)
+ dev_priv->display.optimize_watermarks(intel_cstate);
+ }
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
+
+ intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
if (intel_state->modeset)
@@ -13639,9 +13748,6 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (hw_check)
- intel_modeset_check_state(dev, state);
-
drm_atomic_state_free(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13701,116 +13807,15 @@ out:
#undef for_each_intel_crtc_masked
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = intel_crtc_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
+ .set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
};
-static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
-
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
- return false;
-
- val = I915_READ(PCH_DPLL(pll->id));
- hw_state->dpll = val;
- hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
- hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
- return val & DPLL_VCO_ENABLE;
-}
-
-static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
-}
-
-static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- /* PCH refclock must be enabled first */
- ibx_assert_pch_refclk_enabled(dev_priv);
-
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(200);
-}
-
-static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- struct drm_device *dev = dev_priv->dev;
- struct intel_crtc *crtc;
-
- /* Make sure no transcoder isn't still depending on us. */
- for_each_intel_crtc(dev, crtc) {
- if (intel_crtc_to_shared_dpll(crtc) == pll)
- assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
- }
-
- I915_WRITE(PCH_DPLL(pll->id), 0);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(200);
-}
-
-static char *ibx_pch_dpll_names[] = {
- "PCH DPLL A",
- "PCH DPLL B",
-};
-
-static void ibx_pch_dpll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- dev_priv->num_shared_dpll = 2;
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
- dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
- dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
- dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
- dev_priv->shared_dplls[i].get_hw_state =
- ibx_pch_dpll_get_hw_state;
- }
-}
-
-static void intel_shared_dpll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_DDI(dev))
- intel_ddi_pll_init(dev);
- else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ibx_pch_dpll_init(dev);
- else
- dev_priv->num_shared_dpll = 0;
-
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
@@ -13856,10 +13861,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
if (needs_modeset(crtc_state))
ret = i915_gem_object_wait_rendering(old_obj, true);
-
- /* Swallow -EIO errors to allow updates during hw lockup. */
- if (ret && ret != -EIO)
+ if (ret) {
+ /* GPU hangs should have been swallowed by the wait */
+ WARN_ON(ret == -EIO);
return ret;
+ }
}
/* For framebuffer backed by dmabuf, wait for fence */
@@ -13884,7 +13890,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
+ ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
}
if (ret == 0) {
@@ -13928,7 +13934,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
- intel_unpin_fb_obj(old_state->fb, old_state);
+ intel_unpin_fb_obj(old_state->fb, old_state->rotation);
/* prepare_fb aborted? */
if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
@@ -13936,7 +13942,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
-
}
int
@@ -14011,6 +14016,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (modeset)
return;
+ if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
+ intel_color_set_csc(crtc->state);
+ intel_color_load_luts(crtc->state);
+ }
+
if (to_intel_crtc_state(crtc->state)->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_state);
else if (INTEL_INFO(dev)->gen >= 9)
@@ -14054,20 +14064,19 @@ const struct drm_plane_funcs intel_plane_funcs = {
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
- struct intel_plane *primary;
- struct intel_plane_state *state;
+ struct intel_plane *primary = NULL;
+ struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
unsigned int num_formats;
+ int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL)
- return NULL;
+ if (!primary)
+ goto fail;
state = intel_create_plane_state(&primary->base);
- if (!state) {
- kfree(primary);
- return NULL;
- }
+ if (!state)
+ goto fail;
primary->base.state = &state->base;
primary->can_scale = false;
@@ -14109,10 +14118,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
- intel_primary_formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ goto fail;
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
@@ -14120,6 +14131,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
return &primary->base;
+
+fail:
+ kfree(state);
+ kfree(primary);
+
+ return NULL;
}
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
@@ -14236,18 +14253,17 @@ intel_update_cursor_plane(struct drm_plane *plane,
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
- struct intel_plane *cursor;
- struct intel_plane_state *state;
+ struct intel_plane *cursor = NULL;
+ struct intel_plane_state *state = NULL;
+ int ret;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (cursor == NULL)
- return NULL;
+ if (!cursor)
+ goto fail;
state = intel_create_plane_state(&cursor->base);
- if (!state) {
- kfree(cursor);
- return NULL;
- }
+ if (!state)
+ goto fail;
cursor->base.state = &state->base;
cursor->can_scale = false;
@@ -14259,11 +14275,13 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
- drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_plane_funcs,
- intel_cursor_formats,
- ARRAY_SIZE(intel_cursor_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ ret = drm_universal_plane_init(dev, &cursor->base, 0,
+ &intel_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret)
+ goto fail;
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
@@ -14283,6 +14301,12 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return &cursor->base;
+
+fail:
+ kfree(state);
+ kfree(cursor);
+
+ return NULL;
}
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
@@ -14308,7 +14332,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- int i, ret;
+ int ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
@@ -14344,13 +14368,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (ret)
goto fail;
- drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
- for (i = 0; i < 256; i++) {
- intel_crtc->lut_r[i] = i;
- intel_crtc->lut_g[i] = i;
- intel_crtc->lut_b[i] = i;
- }
-
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
* is hooked to pipe B. Hence we want plane A feeding pipe B.
@@ -14375,6 +14392,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+ intel_color_init(&intel_crtc->base);
+
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
return;
@@ -14499,6 +14518,8 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_A);
intel_ddi_init(dev, PORT_B);
intel_ddi_init(dev, PORT_C);
+
+ intel_dsi_init(dev);
} else if (HAS_DDI(dev)) {
int found;
@@ -14559,6 +14580,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ bool has_edp, has_port;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14567,27 +14590,37 @@ static void intel_setup_outputs(struct drm_device *dev)
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- /* eDP not supported on port D, so don't check VBT */
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev, CHV_DP_D, PORT_D);
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
intel_dsi_init(dev);
@@ -14868,6 +14901,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
+ intel_fill_fb_info(dev_priv, &intel_fb->base);
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -14888,8 +14923,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
- mode_cmd.handles[0]));
+ obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
@@ -14915,23 +14949,13 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_clear = intel_atomic_state_clear,
};
-/* Set up chip specific display functions */
-static void intel_init_display(struct drm_device *dev)
+/**
+ * intel_init_display_hooks - initialize the display modesetting hooks
+ * @dev_priv: device private
+ */
+void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
- dev_priv->display.find_dpll = g4x_find_best_dpll;
- else if (IS_CHERRYVIEW(dev))
- dev_priv->display.find_dpll = chv_find_best_dpll;
- else if (IS_VALLEYVIEW(dev))
- dev_priv->display.find_dpll = vlv_find_best_dpll;
- else if (IS_PINEVIEW(dev))
- dev_priv->display.find_dpll = pnv_find_best_dpll;
- else
- dev_priv->display.find_dpll = i9xx_find_best_dpll;
-
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_INFO(dev_priv)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
skylake_get_initial_plane_config;
@@ -14939,7 +14963,7 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
ironlake_get_initial_plane_config;
@@ -14947,7 +14971,7 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
ironlake_get_initial_plane_config;
@@ -14955,106 +14979,134 @@ static void intel_init_display(struct drm_device *dev)
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ dev_priv->display.crtc_enable = valleyview_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else if (IS_G4X(dev_priv)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else if (!IS_GEN2(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
/* Returns the core display clock speed */
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dev_priv->display.get_display_clock_speed =
broxton_get_display_clock_speed;
- else if (IS_BROADWELL(dev))
+ else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_display_clock_speed =
broadwell_get_display_clock_speed;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev_priv))
dev_priv->display.get_display_clock_speed =
haswell_get_display_clock_speed;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
- else if (IS_GEN5(dev))
+ else if (IS_GEN5(dev_priv))
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
- else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
- IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+ else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
+ IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
- else if (IS_GM45(dev))
+ else if (IS_GM45(dev_priv))
dev_priv->display.get_display_clock_speed =
gm45_get_display_clock_speed;
- else if (IS_CRESTLINE(dev))
+ else if (IS_CRESTLINE(dev_priv))
dev_priv->display.get_display_clock_speed =
i965gm_get_display_clock_speed;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
- else if (IS_G33(dev) || IS_G4X(dev))
+ else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
dev_priv->display.get_display_clock_speed =
g33_get_display_clock_speed;
- else if (IS_I915G(dev))
+ else if (IS_I915G(dev_priv))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev))
+ else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
- else if (IS_I915GM(dev))
+ else if (IS_I915GM(dev_priv))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
- else if (IS_I865G(dev))
+ else if (IS_I865G(dev_priv))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
- else if (IS_I85X(dev))
+ else if (IS_I85X(dev_priv))
dev_priv->display.get_display_clock_speed =
i85x_get_display_clock_speed;
else { /* 830 */
- WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
+ WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
}
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev_priv)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
broadwell_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broadwell_modeset_calc_cdclk;
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
valleyview_modeset_calc_cdclk;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
broxton_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broxton_modeset_calc_cdclk;
}
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_INFO(dev_priv)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
@@ -15081,8 +15133,6 @@ static void intel_init_display(struct drm_device *dev)
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
}
-
- mutex_init(&dev_priv->pps_mutex);
}
/*
@@ -15305,7 +15355,7 @@ static void sanitize_watermarks(struct drm_device *dev)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.program_watermarks)
+ if (!dev_priv->display.optimize_watermarks)
return;
/*
@@ -15326,6 +15376,13 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
ret = intel_atomic_check(dev, state);
if (ret) {
/*
@@ -15348,7 +15405,8 @@ retry:
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
- dev_priv->display.program_watermarks(cs);
+ cs->wm.need_postvbl_update = true;
+ dev_priv->display.optimize_watermarks(cs);
}
drm_atomic_state_free(state);
@@ -15359,7 +15417,8 @@ fail:
void intel_modeset_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
@@ -15401,9 +15460,6 @@ void intel_modeset_init(struct drm_device *dev)
}
}
- intel_init_display(dev);
- intel_init_audio(dev);
-
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
@@ -15426,7 +15482,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+ dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev)->num_pipes,
@@ -15443,6 +15499,7 @@ void intel_modeset_init(struct drm_device *dev)
}
intel_update_czclk(dev_priv);
+ intel_update_rawclk(dev_priv);
intel_update_cdclk(dev);
intel_shared_dpll_init(dev);
@@ -15555,10 +15612,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
- I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ i915_reg_t reg = PIPECONF(cpu_transcoder);
+
+ I915_WRITE(reg,
+ I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ }
/* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base);
@@ -15606,38 +15668,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (!intel_crtc_has_encoders(crtc))
+ if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
- if (crtc->active != crtc->base.state->active) {
- struct intel_encoder *encoder;
-
- /* This can happen either due to bugs in the get_hw_state
- * functions or because of calls to intel_crtc_disable_noatomic,
- * or because the pipe is force-enabled due to the
- * pipe A quirk. */
- DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
- crtc->base.base.id,
- crtc->base.state->enable ? "enabled" : "disabled",
- crtc->active ? "enabled" : "disabled");
-
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
- crtc->base.state->active = crtc->active;
- crtc->base.enabled = crtc->active;
- crtc->base.state->connector_mask = 0;
- crtc->base.state->encoder_mask = 0;
-
- /* Because we only establish the connector -> encoder ->
- * crtc links if something is active, this means the
- * crtc is now deactivated. Break the links. connector
- * -> encoder links are only establish when things are
- * actually up, hence no need to break them. */
- WARN_ON(crtc->active);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- encoder->base.crtc = NULL;
- }
-
if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
/*
* We start out with underrun reporting disabled to avoid races.
@@ -15767,7 +15800,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc_state *crtc_state = crtc->config;
int pixclk = 0;
- __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
crtc_state->base.crtc = &crtc->base;
@@ -15806,22 +15839,17 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- pll->on = pll->get_hw_state(dev_priv, pll,
- &pll->config.hw_state);
- pll->active = 0;
+ pll->on = pll->funcs.get_hw_state(dev_priv, pll,
+ &pll->config.hw_state);
pll->config.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
- pll->active++;
+ if (crtc->active && crtc->config->shared_dpll == pll)
pll->config.crtc_mask |= 1 << crtc->pipe;
- }
}
+ pll->active_mask = pll->config.crtc_mask;
DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
pll->name, pll->config.crtc_mask, pll->on);
-
- if (pll->config.crtc_mask)
- intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
for_each_intel_encoder(dev, encoder) {
@@ -15903,6 +15931,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
+
+ intel_pipe_config_sanity_check(dev_priv, crtc->config);
}
}
@@ -15937,12 +15967,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- if (!pll->on || pll->active)
+ if (!pll->on || pll->active_mask)
continue;
DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
- pll->disable(dev_priv, pll);
+ pll->funcs.disable(dev_priv, pll);
pll->on = false;
}
@@ -15987,18 +16017,6 @@ void intel_display_resume(struct drm_device *dev)
retry:
ret = drm_modeset_lock_all_ctx(dev, &ctx);
- /*
- * With MST, the number of connectors can change between suspend and
- * resume, which means that the state we want to restore might now be
- * impossible to use since it'll be pointing to non-existant
- * connectors.
- */
- if (ret == 0 && state &&
- state->num_connector != dev->mode_config.num_connector) {
- drm_atomic_state_free(state);
- state = NULL;
- }
-
if (ret == 0 && !setup) {
setup = true;
@@ -16013,6 +16031,9 @@ retry:
state->acquire_ctx = &ctx;
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
@@ -16063,9 +16084,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
continue;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(c->primary,
- c->primary->fb,
- c->primary->state);
+ ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+ c->primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
@@ -16274,8 +16294,9 @@ intel_display_capture_error_state(struct drm_device *dev)
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
+ /* Note: this does not include DSI transcoders. */
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
- if (HAS_DDI(dev_priv->dev))
+ if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
for (i = 0; i < error->num_transcoders; i++) {
@@ -16346,7 +16367,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
}
for (i = 0; i < error->num_transcoders; i++) {
- err_printf(m, "CPU transcoder: %c\n",
+ err_printf(m, "CPU transcoder: %s\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
onoff(error->transcoder[i].power_domain_on));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 69054ef97..891107f92 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -129,6 +129,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
+static void intel_dp_unset_edid(struct intel_dp *intel_dp);
static unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -662,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
else
- done = wait_for_atomic(C, 10) == 0;
+ done = wait_for(C, 10) == 0;
if (!done)
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
has_aux_irq);
@@ -671,60 +672,55 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+ if (index)
+ return 0;
/*
* The clock divider is based off the hrawclk, and would like to run at
- * 2MHz. So, take the hrawclk value and divide by 2 and use that
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
+ return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
if (index)
return 0;
- if (intel_dig_port->port == PORT_A) {
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (intel_dig_port->port == PORT_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
-
- } else {
- return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
- }
+ else
+ return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- if (intel_dig_port->port == PORT_A) {
- if (index)
- return 0;
- return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
- } else if (HAS_PCH_LPT_H(dev_priv)) {
+ if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
case 1: return 72;
default: return 0;
}
- } else {
- return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
-}
-static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- return index ? 0 : 100;
+ return ilk_get_aux_clock_divider(intel_dp, index);
}
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -737,10 +733,10 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
-static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
- int send_bytes,
- uint32_t aux_clock_divider)
+static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1229,71 +1225,6 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
intel_connector_unregister(intel_connector);
}
-static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
-{
- u32 ctrl1;
-
- memset(&pipe_config->dpll_hw_state, 0,
- sizeof(pipe_config->dpll_hw_state));
-
- pipe_config->ddi_pll_sel = SKL_DPLL0;
- pipe_config->dpll_hw_state.cfgcr1 = 0;
- pipe_config->dpll_hw_state.cfgcr2 = 0;
-
- ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (pipe_config->port_clock / 2) {
- case 81000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
- SKL_DPLL0);
- break;
- case 135000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
- SKL_DPLL0);
- break;
- case 270000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
- SKL_DPLL0);
- break;
- case 162000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
- SKL_DPLL0);
- break;
- /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
- results in CDCLK change. Need to handle the change of CDCLK by
- disabling pipes and re-enabling them */
- case 108000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
- SKL_DPLL0);
- break;
- case 216000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
- SKL_DPLL0);
- break;
-
- }
- pipe_config->dpll_hw_state.ctrl1 = ctrl1;
-}
-
-void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
-{
- memset(&pipe_config->dpll_hw_state, 0,
- sizeof(pipe_config->dpll_hw_state));
-
- switch (pipe_config->port_clock / 2) {
- case 81000:
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case 135000:
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case 270000:
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- }
-}
-
static int
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
{
@@ -1570,10 +1501,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
- (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
+ (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp_bpp);
- bpp = dev_priv->vbt.edp_bpp;
+ dev_priv->vbt.edp.bpp);
+ bpp = dev_priv->vbt.edp.bpp;
}
/*
@@ -1651,13 +1582,7 @@ found:
&pipe_config->dp_m2_n2);
}
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config);
- else if (IS_BROXTON(dev))
- /* handled in ddi */;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_dp_set_ddi_pll_sel(pipe_config);
- else
+ if (!HAS_DDI(dev))
intel_dp_set_clock(encoder, pipe_config);
return true;
@@ -1779,11 +1704,11 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+ if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
+ 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- }
DRM_DEBUG_KMS("Wait complete\n");
}
@@ -2290,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
POSTING_READ(DP_A);
udelay(500);
+ /*
+ * [DevILK] Work around required when enabling DP PLL
+ * while a pipe is enabled going to FDI:
+ * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
+ * 2. Program DP PLL enable
+ */
+ if (IS_GEN5(dev_priv))
+ intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+
intel_dp->DP |= DP_PLL_ENABLE;
I915_WRITE(DP_A, intel_dp->DP);
@@ -2409,7 +2343,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int dotclock;
tmp = I915_READ(intel_dp->output_reg);
@@ -2459,16 +2392,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->port_clock = 270000;
}
- dotclock = intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
+ pipe_config->base.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
- if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
-
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
*
@@ -2483,8 +2412,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
* load.
*/
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
- dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
}
@@ -2710,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = crtc->pipe;
if (WARN_ON(dp_reg & DP_PORT_EN))
@@ -2721,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
- /*
- * We get an occasional spurious underrun between the port
- * enable and vdd enable, when enabling port A eDP.
- *
- * FIXME: Not sure if this applies to (PCH) port D eDP as well
- */
- if (port == PORT_A)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
intel_dp_enable_port(intel_dp);
- if (port == PORT_A && IS_GEN5(dev_priv)) {
- /*
- * Underrun reporting for the other pipe was disabled in
- * g4x_pre_enable_dp(). The eDP PLL and port have now been
- * enabled, so it's now safe to re-enable underrun reporting.
- */
- intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
- }
-
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
- if (port == PORT_A)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -2791,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
intel_dp_prepare(encoder);
- if (port == PORT_A && IS_GEN5(dev_priv)) {
- /*
- * We get FIFO underruns on the other pipe when
- * enabling the CPU eDP PLL, and when enabling CPU
- * eDP port. We could potentially avoid the PLL
- * underrun with a vblank wait just prior to enabling
- * the PLL, but that doesn't appear to help the port
- * enable case. Just sweep it all under the rug.
- */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
- }
-
/* Only ilk+ has port A */
if (port == PORT_A)
ironlake_edp_pll_on(intel_dp);
@@ -3184,47 +3074,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
}
/*
- * Native read with retry for link status and receiver capability reads for
- * cases where the sink may still be asleep.
- *
- * Sinks are *supposed* to come up within 1ms from an off state, but we're also
- * supposed to retry 3 times per the spec.
- */
-static ssize_t
-intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size)
-{
- ssize_t ret;
- int i;
-
- /*
- * Sometime we just get the same incorrect byte repeated
- * over the entire buffer. Doing just one throw away read
- * initially seems to "solve" it.
- */
- drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
-
- for (i = 0; i < 3; i++) {
- ret = drm_dp_dpcd_read(aux, offset, buffer, size);
- if (ret == size)
- return ret;
- msleep(1);
- }
-
- return ret;
-}
-
-/*
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- return intel_dp_dpcd_read_wake(&intel_dp->aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
+ return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
+ DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/* These are source-specific values. */
@@ -3238,7 +3095,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
if (IS_BROXTON(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->edp_low_vswing && port == PORT_A)
+ if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
@@ -3859,8 +3716,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint8_t rev;
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
- sizeof(intel_dp->dpcd)) < 0)
+ if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
@@ -3868,12 +3725,33 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
+ &intel_dp->sink_count, 1) < 0)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ return false;
+
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) {
- intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
@@ -3884,9 +3762,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
uint8_t frame_sync_cap;
dev_priv->psr.sink_support = true;
- intel_dp_dpcd_read_wake(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap, 1);
+ drm_dp_dpcd_read(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap, 1);
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
/* PSR2 needs frame sync as well */
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
@@ -3902,15 +3780,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Intermediate frequency support */
if (is_edp(intel_dp) &&
(intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+ (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
(rev >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
- intel_dp_dpcd_read_wake(&intel_dp->aux,
- DP_SUPPORTED_LINK_RATES,
- sink_rates,
- sizeof(sink_rates));
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
int val = le16_to_cpu(sink_rates[i]);
@@ -3933,9 +3809,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
- intel_dp->downstream_ports,
- DP_MAX_DOWNSTREAM_PORTS) < 0)
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) < 0)
return false; /* downstream port status fetch failed */
return true;
@@ -3949,11 +3825,11 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
@@ -3963,13 +3839,16 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
{
u8 buf[1];
+ if (!i915.enable_dp_mst)
+ return false;
+
if (!intel_dp->can_mst)
return false;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
intel_dp->is_mst = true;
@@ -4106,7 +3985,7 @@ stop:
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
- return intel_dp_dpcd_read_wake(&intel_dp->aux,
+ return drm_dp_dpcd_read(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1) == 1;
}
@@ -4116,7 +3995,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
int ret;
- ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+ ret = drm_dp_dpcd_read(&intel_dp->aux,
DP_SINK_COUNT_ESI,
sink_irq_vector, 14);
if (ret != 14)
@@ -4292,6 +4171,36 @@ go_again:
return -EINVAL;
}
+static void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("Failed to get link status\n");
+ return;
+ }
+
+ if (!intel_encoder->base.crtc)
+ return;
+
+ if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+ return;
+
+ /* if link training is requested we should perform it always */
+ if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
+ (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ intel_encoder->base.name);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -4299,16 +4208,19 @@ go_again:
* 2. Configure link according to Receiver Capabilities
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
+ *
+ * intel_dp_short_pulse - handles short pulse interrupts
+ * when full detection is not required.
+ * Returns %true if short pulse is handled and full detection
+ * is NOT required and %false otherwise.
*/
-static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+static bool
+intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
- u8 link_status[DP_LINK_STATUS_SIZE];
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ u8 old_sink_count = intel_dp->sink_count;
+ bool ret;
/*
* Clearing compliance test variables to allow capturing
@@ -4318,20 +4230,17 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
- if (!intel_encoder->base.crtc)
- return;
-
- if (!to_intel_crtc(intel_encoder->base.crtc)->active)
- return;
-
- /* Try to read receiver status if the link appears to be up */
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- return;
- }
+ /*
+ * Now read the DPCD to see if it's actually running
+ * If the current value of sink count doesn't match with
+ * the value that was stored earlier or dpcd read failed
+ * we need to do full detection
+ */
+ ret = intel_dp_get_dpcd(intel_dp);
- /* Now read the DPCD to see if it's actually running */
- if (!intel_dp_get_dpcd(intel_dp)) {
- return;
+ if ((old_sink_count != intel_dp->sink_count) || !ret) {
+ /* No need to proceed if we are going to do full detect */
+ return false;
}
/* Try to read the source of the interrupt */
@@ -4348,14 +4257,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- /* if link training is requested we should perform it always */
- if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
- (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
- DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- intel_encoder->base.name);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
- }
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return true;
}
/* XXX this is probably wrong for multiple downstream ports */
@@ -4368,6 +4274,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
+ if (is_edp(intel_dp))
+ return connector_status_connected;
+
/* if there's no downstream port, we're done */
if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return connector_status_connected;
@@ -4375,14 +4284,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
/* If we're HPD-aware, SINK_COUNT changes dynamically */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
- uint8_t reg;
-
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
- &reg, 1) < 0)
- return connector_status_unknown;
- return DP_GET_SINK_COUNT(reg) ? connector_status_connected
- : connector_status_disconnected;
+ return intel_dp->sink_count ?
+ connector_status_connected : connector_status_disconnected;
}
/* If no HPD, poke DDC gently */
@@ -4591,6 +4495,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct edid *edid;
+ intel_dp_unset_edid(intel_dp);
edid = intel_dp_get_edid(intel_dp);
intel_connector->detect_edid = edid;
@@ -4611,9 +4516,10 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = false;
}
-static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+static void
+intel_dp_long_pulse(struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -4623,17 +4529,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
bool ret;
u8 sink_irq_vector;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- intel_dp_unset_edid(intel_dp);
-
- if (intel_dp->is_mst) {
- /* MST devices are disconnected from a monitor POV */
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- return connector_status_disconnected;
- }
-
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(to_i915(dev), power_domain);
@@ -4651,19 +4546,42 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
+ if (intel_dp->is_mst) {
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ }
+
goto out;
}
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+
intel_dp_probe_oui(intel_dp);
ret = intel_dp_probe_mst(intel_dp);
if (ret) {
- /* if we are in MST mode then this connector
- won't appear connected or have anything with EDID on it */
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ /*
+ * If we are in MST mode then this connector
+ * won't appear connected or have anything
+ * with EDID on it
+ */
status = connector_status_disconnected;
goto out;
+ } else if (connector->status == connector_status_connected) {
+ /*
+ * If display was connected already and is still connected
+ * check links status, there has been known issues of
+ * link loss triggerring long pulse!!!!
+ */
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ goto out;
}
/*
@@ -4676,9 +4594,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_set_edid(intel_dp);
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_connected;
+ intel_dp->detect_done = true;
/* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
@@ -4695,8 +4612,43 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
out:
+ if ((status != connector_status_connected) &&
+ (intel_dp->is_mst == false))
+ intel_dp_unset_edid(intel_dp);
+
intel_display_power_put(to_i915(dev), power_domain);
- return status;
+ return;
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (intel_dp->is_mst) {
+ /* MST devices are disconnected from a monitor POV */
+ intel_dp_unset_edid(intel_dp);
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ return connector_status_disconnected;
+ }
+
+ /* If full detect is not performed yet, do a full detect */
+ if (!intel_dp->detect_done)
+ intel_dp_long_pulse(intel_dp->attached_connector);
+
+ intel_dp->detect_done = false;
+
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
}
static void
@@ -4835,6 +4787,11 @@ intel_dp_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ val == DRM_MODE_SCALE_CENTER) {
+ DRM_DEBUG_KMS("centering not supported\n");
+ return -EINVAL;
+ }
if (intel_connector->panel.fitting_mode == val) {
/* the eDP scaling property is not changed */
@@ -5022,44 +4979,37 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
- if (!intel_digital_port_connected(dev_priv, intel_dig_port))
- goto mst_fail;
+ intel_dp_long_pulse(intel_dp->attached_connector);
+ if (intel_dp->is_mst)
+ ret = IRQ_HANDLED;
+ goto put_power;
- if (!intel_dp_get_dpcd(intel_dp)) {
- goto mst_fail;
- }
-
- intel_dp_probe_oui(intel_dp);
-
- if (!intel_dp_probe_mst(intel_dp)) {
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- goto mst_fail;
- }
} else {
if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
- goto mst_fail;
+ if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ /*
+ * If we were in MST mode, and device is not
+ * there, get out of MST mode
+ */
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ goto put_power;
+ }
}
if (!intel_dp->is_mst) {
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (!intel_dp_short_pulse(intel_dp)) {
+ intel_dp_long_pulse(intel_dp->attached_connector);
+ goto put_power;
+ }
}
}
ret = IRQ_HANDLED;
- goto put_power;
-mst_fail:
- /* if we were in MST mode, and device is not there get out of MST mode */
- if (intel_dp->is_mst) {
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
- }
put_power:
intel_display_power_put(dev_priv, power_domain);
@@ -5070,14 +5020,6 @@ put_power:
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- union child_device_config *p_child;
- int i;
- static const short port_mapping[] = {
- [PORT_B] = DVO_PORT_DPB,
- [PORT_C] = DVO_PORT_DPC,
- [PORT_D] = DVO_PORT_DPD,
- [PORT_E] = DVO_PORT_DPE,
- };
/*
* eDP not supported on g4x. so bail out early just
@@ -5089,18 +5031,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
if (port == PORT_A)
return true;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
-
- if (p_child->common.dvo_port == port_mapping[port] &&
- (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
- (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
- return true;
- }
- return false;
+ return intel_bios_is_port_edp(dev_priv, port);
}
void
@@ -5207,7 +5138,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
- vbt = dev_priv->vbt.edp_pps;
+ vbt = dev_priv->vbt.edp.pps;
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -5258,7 +5189,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
- int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+ int div = dev_priv->rawclk_freq / 1000;
i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -5793,8 +5724,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev,
dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode)
+ if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ }
}
mutex_unlock(&dev->mode_config.mutex);
@@ -5851,19 +5785,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* intel_dp vfuncs */
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
else if (HAS_PCH_SPLIT(dev))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
- intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
- intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
if (HAS_DDI(dev))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -5993,9 +5925,9 @@ fail:
return false;
}
-void
-intel_dp_init(struct drm_device *dev,
- i915_reg_t output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ i915_reg_t output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
@@ -6005,7 +5937,7 @@ intel_dp_init(struct drm_device *dev,
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
@@ -6062,7 +5994,7 @@ intel_dp_init(struct drm_device *dev,
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
@@ -6070,8 +6002,7 @@ err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 926a1e6ea..60fb39cd2 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -160,7 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
break;
}
-
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 2c999725b..7a34090ce 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,7 +33,6 @@
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -90,9 +89,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_m_n.tu = slots;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_dp_set_ddi_pll_sel(pipe_config);
-
return true;
}
@@ -106,7 +102,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
@@ -127,10 +123,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
/* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
intel_dp->active_mst_links--;
- intel_mst->port = NULL;
+
+ intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
@@ -170,7 +167,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
found->encoder = encoder;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- intel_mst->port = found->port;
+
+ intel_mst->connector = found;
if (intel_dp->active_mst_links == 0) {
intel_prepare_ddi_buffer(&intel_dig_port->base);
@@ -188,7 +186,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- intel_mst->port,
+ intel_mst->connector->port,
intel_crtc->config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
@@ -229,7 +227,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
*pipe = intel_mst->pipe;
- if (intel_mst->port)
+ if (intel_mst->connector)
return true;
return false;
}
@@ -290,10 +288,11 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
- if (!edid)
- return 0;
+ if (!intel_dp) {
+ return intel_connector_update_modes(connector, NULL);
+ }
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
@@ -306,6 +305,8 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ if (!intel_dp)
+ return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
@@ -371,6 +372,8 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+ if (!intel_dp)
+ return NULL;
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
@@ -378,6 +381,8 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ if (!intel_dp)
+ return NULL;
return &intel_dp->mst_encoders[0]->base.base;
}
@@ -488,23 +493,11 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
/* need to nuke the connector */
drm_modeset_lock_all(dev);
- if (connector->state->crtc) {
- struct drm_mode_set set;
- int ret;
-
- memset(&set, 0, sizeof(set));
- set.crtc = connector->state->crtc,
-
- ret = drm_atomic_helper_set_config(&set);
-
- WARN(ret, "Disabling mst crtc failed with %i\n", ret);
- }
-
intel_connector_remove_from_fbdev(intel_connector);
- drm_connector_cleanup(connector);
+ intel_connector->mst_port = NULL;
drm_modeset_unlock_all(dev);
- kfree(intel_connector);
+ drm_connector_unreference(&intel_connector->base);
DRM_DEBUG_KMS("\n");
}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
new file mode 100644
index 000000000..58f60b278
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -0,0 +1,1786 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ return &dev_priv->shared_dplls[id];
+}
+
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ if (WARN_ON(pll < dev_priv->shared_dplls||
+ pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ return -1;
+
+ return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+}
+
+void
+intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
+
+ config[id].crtc_mask |= 1 << crtc->pipe;
+}
+
+void
+intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
+
+ config[id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
+{
+ bool cur_state;
+ struct intel_dpll_hw_state hw_state;
+
+ if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+ return;
+
+ cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
+ I915_STATE_WARN(cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ pll->name, onoff(state), onoff(cur_state));
+}
+
+void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+
+ if (WARN_ON(pll == NULL))
+ return;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ WARN_ON(!pll->config.crtc_mask);
+ if (!pll->active_mask) {
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ WARN_ON(pll->on);
+ assert_shared_dpll_disabled(dev_priv, pll);
+
+ pll->funcs.mode_set(dev_priv, pll);
+ }
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+/**
+ * intel_enable_shared_dpll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+void intel_enable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+ unsigned old_mask;
+
+ if (WARN_ON(pll == NULL))
+ return;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ old_mask = pll->active_mask;
+
+ if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
+ WARN_ON(pll->active_mask & crtc_mask))
+ goto out;
+
+ pll->active_mask |= crtc_mask;
+
+ DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
+ pll->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
+
+ if (old_mask) {
+ WARN_ON(!pll->on);
+ assert_shared_dpll_enabled(dev_priv, pll);
+ goto out;
+ }
+ WARN_ON(pll->on);
+
+ DRM_DEBUG_KMS("enabling %s\n", pll->name);
+ pll->funcs.enable(dev_priv, pll);
+ pll->on = true;
+
+out:
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+
+ /* PCH only available on ILK+ */
+ if (INTEL_INFO(dev)->gen < 5)
+ return;
+
+ if (pll == NULL)
+ return;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ if (WARN_ON(!(pll->active_mask & crtc_mask)))
+ goto out;
+
+ DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
+ pll->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
+
+ assert_shared_dpll_enabled(dev_priv, pll);
+ WARN_ON(!pll->on);
+
+ pll->active_mask &= ~crtc_mask;
+ if (pll->active_mask)
+ goto out;
+
+ DRM_DEBUG_KMS("disabling %s\n", pll->name);
+ pll->funcs.disable(dev_priv, pll);
+ pll->on = false;
+
+out:
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static struct intel_shared_dpll *
+intel_find_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ enum intel_dpll_id range_min,
+ enum intel_dpll_id range_max)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_shared_dpll *pll;
+ struct intel_shared_dpll_config *shared_dpll;
+ enum intel_dpll_id i;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+ for (i = range_min; i <= range_max; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ /* Only want to check enabled timings first */
+ if (shared_dpll[i].crtc_mask == 0)
+ continue;
+
+ if (memcmp(&crtc_state->dpll_hw_state,
+ &shared_dpll[i].hw_state,
+ sizeof(crtc_state->dpll_hw_state)) == 0) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, pll->name,
+ shared_dpll[i].crtc_mask,
+ pll->active_mask);
+ return pll;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = range_min; i <= range_max; i++) {
+ pll = &dev_priv->shared_dplls[i];
+ if (shared_dpll[i].crtc_mask == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
+ crtc->base.base.id, pll->name);
+ return pll;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+intel_reference_shared_dpll(struct intel_shared_dpll *pll,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_shared_dpll_config *shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum intel_dpll_id i = pll->id;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+ if (shared_dpll[i].crtc_mask == 0)
+ shared_dpll[i].hw_state =
+ crtc_state->dpll_hw_state;
+
+ crtc_state->shared_dpll = pll;
+ DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+ pipe_name(crtc->pipe));
+
+ intel_shared_dpll_config_get(shared_dpll, pll, crtc);
+}
+
+void intel_shared_dpll_commit(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ if (!to_intel_atomic_state(state)->dpll_set)
+ return;
+
+ shared_dpll = to_intel_atomic_state(state)->shared_dpll;
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+ pll->config = shared_dpll[i];
+ }
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(PCH_DPLL(pll->id));
+ hw_state->dpll = val;
+ hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
+ hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ /* PCH refclock must be enabled first */
+ ibx_assert_pch_refclk_enabled(dev_priv);
+
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ /* Make sure no transcoder isn't still depending on us. */
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->config->shared_dpll == pll)
+ assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
+ }
+
+ I915_WRITE(PCH_DPLL(pll->id), 0);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(200);
+}
+
+static struct intel_shared_dpll *
+ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = (enum intel_dpll_id) crtc->pipe;
+ pll = &dev_priv->shared_dplls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+ } else {
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_PCH_PLL_A,
+ DPLL_ID_PCH_PLL_B);
+ }
+
+ if (!pll)
+ return NULL;
+
+ /* reference the pll */
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+ .mode_set = ibx_pch_dpll_mode_set,
+ .enable = ibx_pch_dpll_enable,
+ .disable = ibx_pch_dpll_disable,
+ .get_hw_state = ibx_pch_dpll_get_hw_state,
+};
+
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(pll->id));
+ udelay(20);
+}
+
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+ POSTING_READ(SPLL_CTL);
+ udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(SPLL_CTL);
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ hw_state->wrpll = val;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(SPLL_CTL);
+ hw_state->spll = val;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return val & SPLL_PLL_ENABLE;
+}
+
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+ switch (pll->id) {
+ case DPLL_ID_WRPLL1:
+ return PORT_CLK_SEL_WRPLL1;
+ case DPLL_ID_WRPLL2:
+ return PORT_CLK_SEL_WRPLL2;
+ case DPLL_ID_SPLL:
+ return PORT_CLK_SEL_SPLL;
+ case DPLL_ID_LCPLL_810:
+ return PORT_CLK_SEL_LCPLL_810;
+ case DPLL_ID_LCPLL_1350:
+ return PORT_CLK_SEL_LCPLL_1350;
+ case DPLL_ID_LCPLL_2700:
+ return PORT_CLK_SEL_LCPLL_2700;
+ default:
+ return PORT_CLK_SEL_NONE;
+ }
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+struct hsw_wrpll_rnp {
+ unsigned p, n2, r2;
+};
+
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
+{
+ unsigned budget;
+
+ switch (clock) {
+ case 25175000:
+ case 25200000:
+ case 27000000:
+ case 27027000:
+ case 37762500:
+ case 37800000:
+ case 40500000:
+ case 40541000:
+ case 54000000:
+ case 54054000:
+ case 59341000:
+ case 59400000:
+ case 72000000:
+ case 74176000:
+ case 74250000:
+ case 81000000:
+ case 81081000:
+ case 89012000:
+ case 89100000:
+ case 108000000:
+ case 108108000:
+ case 111264000:
+ case 111375000:
+ case 148352000:
+ case 148500000:
+ case 162000000:
+ case 162162000:
+ case 222525000:
+ case 222750000:
+ case 296703000:
+ case 297000000:
+ budget = 0;
+ break;
+ case 233500000:
+ case 245250000:
+ case 247750000:
+ case 253250000:
+ case 298000000:
+ budget = 1500;
+ break;
+ case 169128000:
+ case 169500000:
+ case 179500000:
+ case 202000000:
+ budget = 2000;
+ break;
+ case 256250000:
+ case 262500000:
+ case 270000000:
+ case 272500000:
+ case 273750000:
+ case 280750000:
+ case 281250000:
+ case 286000000:
+ case 291750000:
+ budget = 4000;
+ break;
+ case 267250000:
+ case 268500000:
+ budget = 5000;
+ break;
+ default:
+ budget = 1000;
+ break;
+ }
+
+ return budget;
+}
+
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+ unsigned r2, unsigned n2, unsigned p,
+ struct hsw_wrpll_rnp *best)
+{
+ uint64_t a, b, c, d, diff, diff_best;
+
+ /* No best (r,n,p) yet */
+ if (best->p == 0) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ return;
+ }
+
+ /*
+ * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+ * freq2k.
+ *
+ * delta = 1e6 *
+ * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+ * freq2k;
+ *
+ * and we would like delta <= budget.
+ *
+ * If the discrepancy is above the PPM-based budget, always prefer to
+ * improve upon the previous solution. However, if you're within the
+ * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+ */
+ a = freq2k * budget * p * r2;
+ b = freq2k * budget * best->p * best->r2;
+ diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
+ diff_best = abs_diff(freq2k * best->p * best->r2,
+ LC_FREQ_2K * best->n2);
+ c = 1000000 * diff;
+ d = 1000000 * diff_best;
+
+ if (a < c && b < d) {
+ /* If both are above the budget, pick the closer */
+ if (best->p * best->r2 * diff < p * r2 * diff_best) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ } else if (a >= c && b < d) {
+ /* If A is below the threshold but B is above it? Update. */
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ } else if (a >= c && b >= d) {
+ /* Both are below the limit, so pick the higher n2/(r2*r2) */
+ if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ }
+ /* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+ unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+ uint64_t freq2k;
+ unsigned p, n2, r2;
+ struct hsw_wrpll_rnp best = { 0, 0, 0 };
+ unsigned budget;
+
+ freq2k = clock / 100;
+
+ budget = hsw_wrpll_get_budget_for_freq(clock);
+
+ /* Special case handling for 540 pixel clock: bypass WR PLL entirely
+ * and directly pass the LC PLL to it. */
+ if (freq2k == 5400000) {
+ *n2_out = 2;
+ *p_out = 1;
+ *r2_out = 2;
+ return;
+ }
+
+ /*
+ * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+ * the WR PLL.
+ *
+ * We want R so that REF_MIN <= Ref <= REF_MAX.
+ * Injecting R2 = 2 * R gives:
+ * REF_MAX * r2 > LC_FREQ * 2 and
+ * REF_MIN * r2 < LC_FREQ * 2
+ *
+ * Which means the desired boundaries for r2 are:
+ * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+ *
+ */
+ for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+ r2 <= LC_FREQ * 2 / REF_MIN;
+ r2++) {
+
+ /*
+ * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+ *
+ * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+ * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+ * VCO_MAX * r2 > n2 * LC_FREQ and
+ * VCO_MIN * r2 < n2 * LC_FREQ)
+ *
+ * Which means the desired boundaries for n2 are:
+ * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+ */
+ for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+ n2 <= VCO_MAX * r2 / LC_FREQ;
+ n2++) {
+
+ for (p = P_MIN; p <= P_MAX; p += P_INC)
+ hsw_wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
+ }
+ }
+
+ *n2_out = best.n2;
+ *p_out = best.p;
+ *r2_out = best.r2;
+}
+
+static struct intel_shared_dpll *
+hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ int clock = crtc_state->port_clock;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ uint32_t val;
+ unsigned p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->dpll_hw_state.wrpll = val;
+
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+ } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ encoder->type == INTEL_OUTPUT_DP_MST ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ enum intel_dpll_id pll_id;
+
+ switch (clock / 2) {
+ case 81000:
+ pll_id = DPLL_ID_LCPLL_810;
+ break;
+ case 135000:
+ pll_id = DPLL_ID_LCPLL_1350;
+ break;
+ case 270000:
+ pll_id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+ return NULL;
+ }
+
+ pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+ } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll =
+ SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_SPLL, DPLL_ID_SPLL);
+ } else {
+ return NULL;
+ }
+
+ if (!pll)
+ return NULL;
+
+ crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+
+static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+ .enable = hsw_ddi_wrpll_enable,
+ .disable = hsw_ddi_wrpll_disable,
+ .get_hw_state = hsw_ddi_wrpll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+ .enable = hsw_ddi_spll_enable,
+ .disable = hsw_ddi_spll_disable,
+ .get_hw_state = hsw_ddi_spll_get_hw_state,
+};
+
+static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return true;
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+ .enable = hsw_ddi_lcpll_enable,
+ .disable = hsw_ddi_lcpll_disable,
+ .get_hw_state = hsw_ddi_lcpll_get_hw_state,
+};
+
+struct skl_dpll_regs {
+ i915_reg_t ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[4] = {
+ {
+ /* DPLL 0 */
+ .ctl = LCPLL1_CTL,
+ /* DPLL 0 doesn't support HDMI mode */
+ },
+ {
+ /* DPLL 1 */
+ .ctl = LCPLL2_CTL,
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
+ },
+ {
+ /* DPLL 2 */
+ .ctl = WRPLL_CTL(0),
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
+ },
+ {
+ /* DPLL 3 */
+ .ctl = WRPLL_CTL(1),
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
+ },
+};
+
+static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
+ DPLL_CTRL1_LINK_RATE_MASK(pll->id));
+ val |= pll->config.hw_state.ctrl1 << (pll->id * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+}
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ skl_ddi_pll_write_ctrl1(dev_priv, pll);
+
+ I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ POSTING_READ(regs[pll->id].cfgcr1);
+ POSTING_READ(regs[pll->id].cfgcr2);
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+ if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+ DRM_ERROR("DPLL %d not locked\n", pll->id);
+}
+
+static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ skl_ddi_pll_write_ctrl1(dev_priv, pll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+ POSTING_READ(regs[pll->id].ctl);
+}
+
+static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ ret = false;
+
+ val = I915_READ(regs[pll->id].ctl);
+ if (!(val & LCPLL_PLL_ENABLE))
+ goto out;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+
+ /* avoid reading back stale values if HDMI mode is not enabled */
+ if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+ hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+ hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+ }
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
+}
+
+static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ ret = false;
+
+ /* DPLL0 is always enabled since it drives CDCLK */
+ val = I915_READ(regs[pll->id].ctl);
+ if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+ goto out;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
+}
+
+struct skl_wrpll_context {
+ uint64_t min_deviation; /* current minimal deviation */
+ uint64_t central_freq; /* chosen central freq */
+ uint64_t dco_freq; /* chosen dco freq */
+ unsigned int p; /* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6% of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+ uint64_t central_freq,
+ uint64_t dco_freq,
+ unsigned int divider)
+{
+ uint64_t deviation;
+
+ deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+ central_freq);
+
+ /* positive deviation */
+ if (dco_freq >= central_freq) {
+ if (deviation < SKL_DCO_MAX_PDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+ /* negative deviation */
+ } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+ unsigned int *p0 /* out */,
+ unsigned int *p1 /* out */,
+ unsigned int *p2 /* out */)
+{
+ /* even dividers */
+ if (p % 2 == 0) {
+ unsigned int half = p / 2;
+
+ if (half == 1 || half == 2 || half == 3 || half == 5) {
+ *p0 = 2;
+ *p1 = 1;
+ *p2 = half;
+ } else if (half % 2 == 0) {
+ *p0 = 2;
+ *p1 = half / 2;
+ *p2 = 2;
+ } else if (half % 3 == 0) {
+ *p0 = 3;
+ *p1 = half / 3;
+ *p2 = 2;
+ } else if (half % 7 == 0) {
+ *p0 = 7;
+ *p1 = half / 7;
+ *p2 = 2;
+ }
+ } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = p / 3;
+ } else if (p == 5 || p == 7) {
+ *p0 = p;
+ *p1 = 1;
+ *p2 = 1;
+ } else if (p == 15) {
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = 5;
+ } else if (p == 21) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 3;
+ } else if (p == 35) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 5;
+ }
+}
+
+struct skl_wrpll_params {
+ uint32_t dco_fraction;
+ uint32_t dco_integer;
+ uint32_t qdiv_ratio;
+ uint32_t qdiv_mode;
+ uint32_t kdiv;
+ uint32_t pdiv;
+ uint32_t central_freq;
+};
+
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ uint64_t afe_clock,
+ uint64_t central_freq,
+ uint32_t p0, uint32_t p1, uint32_t p2)
+{
+ uint64_t dco_freq;
+
+ switch (central_freq) {
+ case 9600000000ULL:
+ params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ params->central_freq = 3;
+ }
+
+ switch (p0) {
+ case 1:
+ params->pdiv = 0;
+ break;
+ case 2:
+ params->pdiv = 1;
+ break;
+ case 3:
+ params->pdiv = 2;
+ break;
+ case 7:
+ params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ switch (p2) {
+ case 5:
+ params->kdiv = 0;
+ break;
+ case 2:
+ params->kdiv = 1;
+ break;
+ case 3:
+ params->kdiv = 2;
+ break;
+ case 1:
+ params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ params->qdiv_ratio = p1;
+ params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+ dco_freq = p0 * p1 * p2 * afe_clock;
+
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_fraction =
+ div_u64((div_u64(dco_freq, 24) -
+ params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 24, 28, 30, 32, 36, 40, 42, 44,
+ 48, 52, 54, 56, 60, 64, 66, 68,
+ 70, 72, 76, 78, 80, 84, 88, 90,
+ 92, 96, 98 };
+ static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+ static const struct {
+ const int *list;
+ int n_dividers;
+ } dividers[] = {
+ { even_dividers, ARRAY_SIZE(even_dividers) },
+ { odd_dividers, ARRAY_SIZE(odd_dividers) },
+ };
+ struct skl_wrpll_context ctx;
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+
+ skl_wrpll_context_init(&ctx);
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+ for (i = 0; i < dividers[d].n_dividers; i++) {
+ unsigned int p = dividers[d].list[i];
+ uint64_t dco_freq = p * afe_clock;
+
+ skl_wrpll_try_divider(&ctx,
+ dco_central_freq[dco],
+ dco_freq,
+ p);
+ /*
+ * Skip the remaining dividers if we're sure to
+ * have found the definitive divider, we can't
+ * improve a 0 deviation.
+ */
+ if (ctx.min_deviation == 0)
+ goto skip_remaining_dividers;
+ }
+ }
+
+skip_remaining_dividers:
+ /*
+ * If a solution is found with an even divider, prefer
+ * this one.
+ */
+ if (d == 0 && ctx.p)
+ break;
+ }
+
+ if (!ctx.p) {
+ DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+ return false;
+ }
+
+ /*
+ * gcc incorrectly analyses that these can be used without being
+ * initialized. To be fair, it's hard to guess.
+ */
+ p0 = p1 = p2 = 0;
+ skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+ p0, p1, p2);
+
+ return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_shared_dpll *pll;
+ uint32_t ctrl1, cfgcr1, cfgcr2;
+ int clock = crtc_state->port_clock;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ struct skl_wrpll_params wrpll_params = { 0, };
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ return NULL;
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+ } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ encoder->type == INTEL_OUTPUT_DP_MST ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ switch (crtc_state->port_clock / 2) {
+ case 81000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+ break;
+ case 135000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+ break;
+ case 270000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+ break;
+ /* eDP 1.4 rates */
+ case 162000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+ break;
+ /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+ results in CDCLK change. Need to handle the change of CDCLK by
+ disabling pipes and re-enabling them */
+ case 108000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+ break;
+ }
+
+ cfgcr1 = cfgcr2 = 0;
+ } else {
+ return NULL;
+ }
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_SKL_DPLL0,
+ DPLL_ID_SKL_DPLL0);
+ else
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_SKL_DPLL1,
+ DPLL_ID_SKL_DPLL3);
+ if (!pll)
+ return NULL;
+
+ crtc_state->ddi_pll_sel = pll->id;
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+ .enable = skl_ddi_pll_enable,
+ .disable = skl_ddi_pll_disable,
+ .get_hw_state = skl_ddi_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+ .enable = skl_ddi_dpll0_enable,
+ .disable = skl_ddi_dpll0_disable,
+ .get_hw_state = skl_ddi_dpll0_get_hw_state,
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t temp;
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+
+ /* Non-SSC reference */
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_REF_SEL;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ /* Disable 10 bit clock */
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+ /* Write P1 & P2 */
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+ temp |= pll->config.hw_state.ebb0;
+ I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+
+ /* Write M2 integer */
+ temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp &= ~PORT_PLL_M2_MASK;
+ temp |= pll->config.hw_state.pll0;
+ I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+
+ /* Write N */
+ temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp &= ~PORT_PLL_N_MASK;
+ temp |= pll->config.hw_state.pll1;
+ I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+
+ /* Write M2 fraction */
+ temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp &= ~PORT_PLL_M2_FRAC_MASK;
+ temp |= pll->config.hw_state.pll2;
+ I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+
+ /* Write M2 fraction enable */
+ temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+ temp |= pll->config.hw_state.pll3;
+ I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+
+ /* Write coeff */
+ temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp &= ~PORT_PLL_PROP_COEFF_MASK;
+ temp &= ~PORT_PLL_INT_COEFF_MASK;
+ temp &= ~PORT_PLL_GAIN_CTL_MASK;
+ temp |= pll->config.hw_state.pll6;
+ I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+
+ /* Write calibration val */
+ temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp &= ~PORT_PLL_TARGET_CNT_MASK;
+ temp |= pll->config.hw_state.pll8;
+ I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+
+ temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+ temp |= pll->config.hw_state.pll9;
+ I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+
+ temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+ temp &= ~PORT_PLL_DCO_AMP_MASK;
+ temp |= pll->config.hw_state.pll10;
+ I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+
+ /* Recalibrate with new settings */
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp |= PORT_PLL_RECALIBRATE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ temp |= pll->config.hw_state.ebb4;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+ /* Enable PLL */
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+ if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ 200))
+ DRM_ERROR("PLL %d not locked\n", port);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp &= ~LANE_STAGGER_MASK;
+ temp &= ~LANESTAGGER_STRAP_OVRD;
+ temp |= pll->config.hw_state.pcsdw12;
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ uint32_t temp;
+
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ uint32_t val;
+ bool ret;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ ret = false;
+
+ val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ if (!(val & PORT_PLL_ENABLE))
+ goto out;
+
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 &= PORT_PLL_M2_MASK;
+
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 &= PORT_PLL_N_MASK;
+
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+ PORT_PLL_INT_COEFF_MASK |
+ PORT_PLL_GAIN_CTL_MASK;
+
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+ PORT_PLL_DCO_AMP_MASK;
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers. We configure all lanes the same way, so
+ * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+ */
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+ DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
+}
+
+/* bxt clock parameters */
+struct bxt_clk_div {
+ int clock;
+ uint32_t p1;
+ uint32_t p2;
+ uint32_t m2_int;
+ uint32_t m2_frac;
+ bool m2_frac_en;
+ uint32_t n;
+};
+
+/* pre-calculated values for DP linkrates */
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+ {162000, 4, 2, 32, 1677722, 1, 1},
+ {270000, 4, 1, 27, 0, 0, 1},
+ {540000, 2, 1, 27, 0, 0, 1},
+ {216000, 3, 2, 32, 1677722, 1, 1},
+ {243000, 4, 1, 24, 1258291, 1, 1},
+ {324000, 4, 1, 32, 1677722, 1, 1},
+ {432000, 3, 1, 32, 1677722, 1, 1}
+};
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+ struct intel_digital_port *intel_dig_port;
+ struct bxt_clk_div clk_div = {0};
+ int vco = 0;
+ uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+ uint32_t lanestagger;
+ int clock = crtc_state->port_clock;
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ intel_clock_t best_clock;
+
+ /* Calculate HDMI div */
+ /*
+ * FIXME: tie the following calculation into
+ * i9xx_crtc_compute_clock
+ */
+ if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+ clock, pipe_name(crtc->pipe));
+ return NULL;
+ }
+
+ clk_div.p1 = best_clock.p1;
+ clk_div.p2 = best_clock.p2;
+ WARN_ON(best_clock.m1 != 2);
+ clk_div.n = best_clock.n;
+ clk_div.m2_int = best_clock.m2 >> 22;
+ clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
+ clk_div.m2_frac_en = clk_div.m2_frac != 0;
+
+ vco = best_clock.vco;
+ } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ int i;
+
+ clk_div = bxt_dp_clk_val[0];
+ for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+ if (bxt_dp_clk_val[i].clock == clock) {
+ clk_div = bxt_dp_clk_val[i];
+ break;
+ }
+ }
+ vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
+ }
+
+ if (vco >= 6200000 && vco <= 6700000) {
+ prop_coef = 4;
+ int_coef = 9;
+ gain_ctl = 3;
+ targ_cnt = 8;
+ } else if ((vco > 5400000 && vco < 6200000) ||
+ (vco >= 4800000 && vco < 5400000)) {
+ prop_coef = 5;
+ int_coef = 11;
+ gain_ctl = 3;
+ targ_cnt = 9;
+ } else if (vco == 5400000) {
+ prop_coef = 3;
+ int_coef = 8;
+ gain_ctl = 1;
+ targ_cnt = 9;
+ } else {
+ DRM_ERROR("Invalid VCO\n");
+ return NULL;
+ }
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (clock > 270000)
+ lanestagger = 0x18;
+ else if (clock > 135000)
+ lanestagger = 0x0d;
+ else if (clock > 67000)
+ lanestagger = 0x07;
+ else if (clock > 33000)
+ lanestagger = 0x04;
+ else
+ lanestagger = 0x02;
+
+ crtc_state->dpll_hw_state.ebb0 =
+ PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
+ crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
+ crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
+ crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+
+ if (clk_div.m2_frac_en)
+ crtc_state->dpll_hw_state.pll3 =
+ PORT_PLL_M2_FRAC_ENABLE;
+
+ crtc_state->dpll_hw_state.pll6 =
+ prop_coef | PORT_PLL_INT_COEFF(int_coef);
+ crtc_state->dpll_hw_state.pll6 |=
+ PORT_PLL_GAIN_CTL(gain_ctl);
+
+ crtc_state->dpll_hw_state.pll8 = targ_cnt;
+
+ crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+
+ crtc_state->dpll_hw_state.pll10 =
+ PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+ | PORT_PLL_DCO_AMP_OVR_EN_H;
+
+ crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+ crtc_state->dpll_hw_state.pcsdw12 =
+ LANESTAGGER_STRAP_OVRD | lanestagger;
+
+ intel_dig_port = enc_to_dig_port(&encoder->base);
+
+ /* 1:1 mapping between ports and PLLs */
+ i = (enum intel_dpll_id) intel_dig_port->port;
+ pll = intel_get_shared_dpll_by_id(dev_priv, i);
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ /* shared DPLL id 0 is DPLL A */
+ crtc_state->ddi_pll_sel = pll->id;
+
+ return pll;
+}
+
+static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+ .enable = bxt_ddi_pll_enable,
+ .disable = bxt_ddi_pll_disable,
+ .get_hw_state = bxt_ddi_pll_get_hw_state,
+};
+
+static void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ int cdclk_freq;
+
+ cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->skl_boot_cdclk = cdclk_freq;
+ if (skl_sanitize_cdclk(dev_priv))
+ DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+ DRM_ERROR("LCPLL1 is disabled\n");
+ } else if (!IS_BROXTON(dev_priv)) {
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+ }
+}
+
+struct dpll_info {
+ const char *name;
+ const int id;
+ const struct intel_shared_dpll_funcs *funcs;
+ uint32_t flags;
+};
+
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder);
+};
+
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
+ { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
+ { NULL, -1, NULL, 0 },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dpll = ibx_get_dpll,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
+ { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
+ { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
+ { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+ { NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dpll = hsw_get_dpll,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
+ { NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dpll = skl_get_dpll,
+};
+
+static const struct dpll_info bxt_plls[] = {
+ { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
+ { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
+ { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
+ { NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr bxt_pll_mgr = {
+ .dpll_info = bxt_plls,
+ .get_dpll = bxt_get_dpll,
+};
+
+void intel_shared_dpll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_dpll_mgr *dpll_mgr = NULL;
+ const struct dpll_info *dpll_info;
+ int i;
+
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ dpll_mgr = &skl_pll_mgr;
+ else if (IS_BROXTON(dev))
+ dpll_mgr = &bxt_pll_mgr;
+ else if (HAS_DDI(dev))
+ dpll_mgr = &hsw_pll_mgr;
+ else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dpll_mgr = &pch_pll_mgr;
+
+ if (!dpll_mgr) {
+ dev_priv->num_shared_dpll = 0;
+ return;
+ }
+
+ dpll_info = dpll_mgr->dpll_info;
+
+ for (i = 0; dpll_info[i].id >= 0; i++) {
+ WARN_ON(i != dpll_info[i].id);
+
+ dev_priv->shared_dplls[i].id = dpll_info[i].id;
+ dev_priv->shared_dplls[i].name = dpll_info[i].name;
+ dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
+ dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
+ }
+
+ dev_priv->dpll_mgr = dpll_mgr;
+ dev_priv->num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll_lock);
+
+ BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+
+ /* FIXME: Move this to a more suitable place */
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+}
+
+struct intel_shared_dpll *
+intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ if (WARN_ON(!dpll_mgr))
+ return NULL;
+
+ return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
new file mode 100644
index 000000000..89c5ada1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright © 2012-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DPLL_MGR_H_
+#define _INTEL_DPLL_MGR_H_
+
+/*FIXME: Move this to a more appropriate place. */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void) (&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); })
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+
+struct intel_shared_dpll;
+struct intel_dpll_mgr;
+
+enum intel_dpll_id {
+ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
+ /* real shared dpll ids must be >= 0 */
+ DPLL_ID_PCH_PLL_A = 0,
+ DPLL_ID_PCH_PLL_B = 1,
+ /* hsw/bdw */
+ DPLL_ID_WRPLL1 = 0,
+ DPLL_ID_WRPLL2 = 1,
+ DPLL_ID_SPLL = 2,
+ DPLL_ID_LCPLL_810 = 3,
+ DPLL_ID_LCPLL_1350 = 4,
+ DPLL_ID_LCPLL_2700 = 5,
+
+ /* skl */
+ DPLL_ID_SKL_DPLL0 = 0,
+ DPLL_ID_SKL_DPLL1 = 1,
+ DPLL_ID_SKL_DPLL2 = 2,
+ DPLL_ID_SKL_DPLL3 = 3,
+};
+#define I915_NUM_PLLS 6
+
+/** Inform the state checker that the DPLL is kept enabled even if not
+ * in use by any crtc.
+ */
+#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+
+struct intel_dpll_hw_state {
+ /* i9xx, pch plls */
+ uint32_t dpll;
+ uint32_t dpll_md;
+ uint32_t fp0;
+ uint32_t fp1;
+
+ /* hsw, bdw */
+ uint32_t wrpll;
+ uint32_t spll;
+
+ /* skl */
+ /*
+ * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+ * lower part of ctrl1 and they get shifted into position when writing
+ * the register. This allows us to easily compare the state to share
+ * the DPLL.
+ */
+ uint32_t ctrl1;
+ /* HDMI only, 0 when used for DP */
+ uint32_t cfgcr1, cfgcr2;
+
+ /* bxt */
+ uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+ pcsdw12;
+};
+
+struct intel_shared_dpll_config {
+ unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+ struct intel_dpll_hw_state hw_state;
+};
+
+struct intel_shared_dpll_funcs {
+ /* The mode_set hook is optional and should be used together with the
+ * intel_prepare_shared_dpll function. */
+ void (*mode_set)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state);
+};
+
+struct intel_shared_dpll {
+ struct intel_shared_dpll_config config;
+
+ unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ const char *name;
+ /* should match the index in the dev_priv->shared_dplls array */
+ enum intel_dpll_id id;
+
+ struct intel_shared_dpll_funcs funcs;
+
+ uint32_t flags;
+};
+
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
+/* shared dpll functions */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id);
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+void
+intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc);
+void
+intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *state,
+ struct intel_encoder *encoder);
+void intel_prepare_shared_dpll(struct intel_crtc *crtc);
+void intel_enable_shared_dpll(struct intel_crtc *crtc);
+void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_shared_dpll_commit(struct drm_atomic_state *state);
+void intel_shared_dpll_init(struct drm_device *dev);
+
+
+#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8dd2cc564..f7f0f0181 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,9 +45,13 @@
* contexts. Note that it's important that we check the condition again after
* having timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout.
+ *
+ * TODO: When modesetting has fully transitioned to atomic, the below
+ * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
+ * added.
*/
-#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+#define _wait_for(COND, US, W) ({ \
+ unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
@@ -56,7 +60,7 @@
break; \
} \
if ((W) && drm_can_sleep()) { \
- usleep_range((W)*1000, (W)*2000); \
+ usleep_range((W), (W)*2); \
} else { \
cpu_relax(); \
} \
@@ -64,10 +68,40 @@
ret__; \
})
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-#define wait_for_atomic_us(COND, US) _wait_for((COND), \
- DIV_ROUND_UP((US), 1000), 0)
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
+#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
+
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US) ({ \
+ unsigned long end__; \
+ int ret__ = 0; \
+ _WAIT_FOR_ATOMIC_CHECK; \
+ BUILD_BUG_ON((US) > 50000); \
+ end__ = (local_clock() >> 10) + (US) + 1; \
+ while (!(COND)) { \
+ if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
+ /* Unlike the regular wait_for(), this atomic variant \
+ * cannot be preempted (and we'll just ignore the issue\
+ * of irq interruptions) and so we know that no time \
+ * has passed since the last check of COND and can \
+ * immediately report the timeout. \
+ */ \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ } \
+ ret__; \
+})
+
+#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
+#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -119,6 +153,7 @@ enum intel_output_type {
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
+ struct intel_rotation_info rot_info;
};
struct intel_fbdev {
@@ -261,6 +296,12 @@ struct intel_atomic_state {
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
+
+ /*
+ * Current watermarks can't be trusted during hardware readout, so
+ * don't bother calculating intermediate watermarks.
+ */
+ bool skip_intermediate_wm;
};
struct intel_plane_state {
@@ -350,6 +391,7 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
+ struct intel_wm_level raw_wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
@@ -377,6 +419,7 @@ struct intel_crtc_state {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
+ unsigned fb_bits; /* framebuffers to flip */
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
@@ -395,7 +438,8 @@ struct intel_crtc_state {
bool has_infoframe;
/* CPU Transcoder for the pipe. Currently this can only differ from the
- * pipe on Haswell (where we have a special eDP transcoder). */
+ * pipe on Haswell and later (where we have a special eDP transcoder)
+ * and Broxton (where we have special DSI transcoders). */
enum transcoder cpu_transcoder;
/*
@@ -442,8 +486,8 @@ struct intel_crtc_state {
* haswell. */
struct dpll dpll;
- /* Selected dpll when shared or DPLL_ID_PRIVATE. */
- enum intel_dpll_id shared_dpll;
+ /* Selected dpll when shared or NULL. */
+ struct intel_shared_dpll *shared_dpll;
/*
* - PORT_CLK_SEL for DDI ports on HSW/BDW.
@@ -454,6 +498,11 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
+ /* DSI PLL registers */
+ struct {
+ u32 ctrl, div;
+ } dsi_pll;
+
int pipe_bpp;
struct intel_link_m_n dp_m_n;
@@ -511,14 +560,33 @@ struct intel_crtc_state {
struct {
/*
- * optimal watermarks, programmed post-vblank when this state
- * is committed
+ * Optimal watermarks, programmed post-vblank when this state
+ * is committed.
*/
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} optimal;
+
+ /*
+ * Intermediate watermarks; these can be programmed immediately
+ * since they satisfy both the current configuration we're
+ * switching away from and the new configuration we're switching
+ * to.
+ */
+ struct intel_pipe_wm intermediate;
+
+ /*
+ * Platforms with two-step watermark programming will need to
+ * update watermark programming post-vblank to switch from the
+ * safe intermediate watermarks to the optimal final
+ * watermarks.
+ */
+ bool need_postvbl_update;
} wm;
+
+ /* Gamma mode programmed on the pipe */
+ uint32_t gamma_mode;
};
struct vlv_wm_state {
@@ -538,23 +606,6 @@ struct intel_mmio_flip {
unsigned int rotation;
};
-/*
- * Tracking of operations that need to be performed at the beginning/end of an
- * atomic commit, outside the atomic section where interrupts are disabled.
- * These are generally operations that grab mutexes or might otherwise sleep
- * and thus can't be run with interrupts disabled.
- */
-struct intel_crtc_atomic_commit {
- /* Sleepable operations to perform before commit */
-
- /* Sleepable operations to perform after commit */
- unsigned fb_bits;
- bool post_enable_primary;
-
- /* Sleepable operations to perform before and after commit */
- bool update_fbc;
-};
-
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -601,6 +652,7 @@ struct intel_crtc {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} active;
+
/* allow CxSR on this pipe */
bool cxsr_allowed;
} wm;
@@ -614,8 +666,6 @@ struct intel_crtc {
int scanline_start;
} debug;
- struct intel_crtc_atomic_commit atomic;
-
/* scalers available on this crtc */
int num_scalers;
@@ -756,7 +806,9 @@ struct intel_dp {
uint32_t DP;
int link_rate;
uint8_t lane_count;
+ uint8_t sink_count;
bool has_audio;
+ bool detect_done;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
@@ -834,7 +886,7 @@ struct intel_dp_mst_encoder {
struct intel_encoder base;
enum pipe pipe;
struct intel_digital_port *primary;
- void *port; /* store this opaque as its illegal to dereference it */
+ struct intel_connector *connector;
};
static inline enum dpio_channel
@@ -1010,7 +1062,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_pll_init(struct drm_device *dev);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
@@ -1052,17 +1103,19 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
/* intel_audio.c */
-void intel_init_audio(struct drm_device *dev);
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs;
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-int intel_hrawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -1107,9 +1160,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state);
+int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1145,19 +1197,13 @@ intel_rotation_90_or_270(unsigned int rotation)
void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane);
-/* shared dpll functions */
-struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *state);
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1166,6 +1212,9 @@ void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
@@ -1173,21 +1222,24 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- uint64_t fb_modifier,
- unsigned int cpp,
- unsigned int pitch);
+u32 intel_compute_tile_offset(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane,
+ unsigned int pitch,
+ unsigned int rotation);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_device *dev);
-void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_ddi_phy_init(struct drm_device *dev);
-void broxton_ddi_phy_uninit(struct drm_device *dev);
+void broxton_init_cdclk(struct drm_i915_private *dev_priv);
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
@@ -1197,9 +1249,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-void
-ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
- int dotclock);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock);
int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
@@ -1227,11 +1276,13 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
-bool intel_csr_load_program(struct drm_i915_private *);
+void intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
+void intel_csr_ucode_suspend(struct drm_i915_private *);
+void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1269,7 +1320,6 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
-void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1427,8 +1477,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -1545,6 +1595,7 @@ void intel_suspend_hw(struct drm_device *dev);
int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
@@ -1569,6 +1620,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
/* intel_sdvo.c */
@@ -1610,6 +1662,18 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+
+static inline struct intel_plane_state *
+intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_existing_plane_state(state, &plane->base);
+
+ return to_intel_plane_state(plane_state);
+}
+
int intel_atomic_setup_scalers(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
@@ -1621,4 +1685,10 @@ void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+/* intel_color.c */
+void intel_color_init(struct drm_crtc *crtc);
+int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
+void intel_color_set_csc(struct drm_crtc_state *crtc_state);
+void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 01b8e9f4c..4756ef639 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -46,6 +46,40 @@ static const struct {
},
};
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+ 8 * 100), lane_count);
+}
+
+/* return pixels equvalent to txbyteclkhs */
+static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
+ (bpp * burst_mode_ratio));
+}
+
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+{
+ /* It just so happens the VBT matches register contents. */
+ switch (fmt) {
+ case VID_MODE_FORMAT_RGB888:
+ return MIPI_DSI_FMT_RGB888;
+ case VID_MODE_FORMAT_RGB666:
+ return MIPI_DSI_FMT_RGB666;
+ case VID_MODE_FORMAT_RGB666_PACKED:
+ return MIPI_DSI_FMT_RGB666_PACKED;
+ case VID_MODE_FORMAT_RGB565:
+ return MIPI_DSI_FMT_RGB565;
+ default:
+ MISSING_CASE(fmt);
+ return MIPI_DSI_FMT_RGB666;
+ }
+}
+
static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
@@ -268,22 +302,47 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int ret;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
- if (fixed_mode)
+ if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ intel_gmch_panel_fitting(crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ else
+ intel_pch_panel_fitting(crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ }
+
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
+ if (IS_BROXTON(dev_priv)) {
+ /* Dual link goes to DSI transcoder A. */
+ if (intel_dsi->ports == BIT(PORT_C))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
+ }
+
+ ret = intel_compute_dsi_pll(encoder, pipe_config);
+ if (ret)
+ return false;
+
+ pipe_config->clock_set = true;
+
return true;
}
@@ -403,7 +462,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
- if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+ if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
temp |= intel_crtc->pipe ?
@@ -471,14 +530,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
- intel_enable_dsi_pll(encoder);
+ /*
+ * The BIOS may leave the PLL in a wonky state where it doesn't
+ * lock. It needs to be fully powered down to fix it.
+ */
+ intel_disable_dsi_pll(encoder);
+ intel_enable_dsi_pll(encoder, crtc->config);
+
intel_dsi_prepare(encoder);
/* Panel Enable over CRC PMIC */
@@ -488,19 +552,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- /*
- * Disable DPOunit clock gating, can stall pipe
- * and we need DPLL REFA always enabled
- */
- tmp = I915_READ(DPLL(pipe));
- tmp |= DPLL_REF_CLK_ENABLE_VLV;
- I915_WRITE(DPLL(pipe), tmp);
-
- /* update the hw state for DPLL */
- intel_crtc->config->dpll_hw_state.dpll =
- DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-
+ /* Disable DPOunit clock gating, can stall pipe */
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -652,11 +704,16 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
- msleep(intel_dsi->panel_pwr_cycle_delay);
/* Panel Disable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+
+ /*
+ * FIXME As we do with eDP, just make a note of the time here
+ * and perform the wait before the next panel power on.
+ */
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -667,7 +724,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
- bool ret;
+ bool active = false;
DRM_DEBUG_KMS("\n");
@@ -675,55 +732,234 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = false;
+ /*
+ * On Broxton the PLL needs to be enabled with a valid divider
+ * configuration, otherwise accessing DSI registers will hang the
+ * machine. See BSpec North Display Engine registers/MIPI[BXT].
+ */
+ if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- u32 dpi_enabled, func;
+ bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
- func = I915_READ(MIPI_DSI_FUNC_PRG(port));
- dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
-
- /* Due to some hardware limitations on BYT, MIPI Port C DPI
- * Enable bit does not get set. To check whether DSI Port C
- * was enabled in BIOS, check the Pipe B enable bit
+ /*
+ * Due to some hardware limitations on VLV/CHV, the DPI enable
+ * bit in port C control register does not get set. As a
+ * workaround, check pipe B conf instead.
*/
- if (IS_VALLEYVIEW(dev) && port == PORT_C)
- dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
- PIPECONF_ENABLE;
+ if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+ enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+
+ /* Try command mode if video mode not enabled */
+ if (!enabled) {
+ u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
+ }
+
+ if (!enabled)
+ continue;
+
+ if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ continue;
+
+ if (IS_BROXTON(dev_priv)) {
+ u32 tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= BXT_PIPE_SELECT_MASK;
+ tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
- if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
- *pipe = port == PORT_A ? PIPE_A : PIPE_B;
- ret = true;
+ if (WARN_ON(tmp > PIPE_C))
+ continue;
- goto out;
- }
+ *pipe = tmp;
+ } else {
+ *pipe = port == PORT_A ? PIPE_A : PIPE_B;
}
+
+ active = true;
+ break;
}
-out:
+
+out_put_power:
intel_display_power_put(dev_priv, power_domain);
- return ret;
+ return active;
+}
+
+static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode_sw;
+ struct intel_crtc *intel_crtc;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ unsigned int lane_count = intel_dsi->lane_count;
+ unsigned int bpp, fmt;
+ enum port port;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u16 hfp_sw, hsync_sw, hbp_sw;
+ u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
+ crtc_hblank_start_sw, crtc_hblank_end_sw;
+
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
+ adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
+
+ /*
+ * Atleast one port is active as encoder->get_config called only if
+ * encoder->get_hw_state() returns true.
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ break;
+ }
+
+ fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ pipe_config->pipe_bpp =
+ mipi_dsi_pixel_format_to_bpp(
+ pixel_format_from_register_bits(fmt));
+ bpp = pipe_config->pipe_bpp;
+
+ /* In terms of pixels */
+ adjusted_mode->crtc_hdisplay =
+ I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+ adjusted_mode->crtc_vdisplay =
+ I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+ adjusted_mode->crtc_vtotal =
+ I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ hfp = I915_READ(MIPI_HFP_COUNT(port));
+
+ /*
+ * Meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes
+ */
+ hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = I915_READ(MIPI_HBP_COUNT(port));
+
+ /* harizontal values are in terms of high speed byte clock */
+ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp *= 2;
+ hsync *= 2;
+ hbp *= 2;
+ }
+
+ /* vertical values are in terms of lines */
+ vfp = I915_READ(MIPI_VFP_COUNT(port));
+ vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
+ vbp = I915_READ(MIPI_VBP_COUNT(port));
+
+ adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
+ adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+ adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
+ adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+
+ /*
+ * In BXT DSI there is no regs programmed with few horizontal timings
+ * in Pixels but txbyteclkhs.. So retrieval process adds some
+ * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
+ * Actually here for the given adjusted_mode, we are calculating the
+ * value programmed to the port and then back to the horizontal timing
+ * param in pixels. This is the expected value, including roundup errors
+ * And if that is same as retrieved value from port, then
+ * (HW state) adjusted_mode's horizontal timings are corrected to
+ * match with SW state to nullify the errors.
+ */
+ /* Calculating the value programmed to the Port register */
+ hfp_sw = adjusted_mode_sw->crtc_hsync_start -
+ adjusted_mode_sw->crtc_hdisplay;
+ hsync_sw = adjusted_mode_sw->crtc_hsync_end -
+ adjusted_mode_sw->crtc_hsync_start;
+ hbp_sw = adjusted_mode_sw->crtc_htotal -
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (intel_dsi->dual_link) {
+ hfp_sw /= 2;
+ hsync_sw /= 2;
+ hbp_sw /= 2;
+ }
+
+ hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ /* Reverse calculating the adjusted mode parameters from port reg vals*/
+ hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp_sw *= 2;
+ hsync_sw *= 2;
+ hbp_sw *= 2;
+ }
+
+ crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
+ hsync_sw + hbp_sw;
+ crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
+ crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
+ crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
+ crtc_hblank_end_sw = crtc_htotal_sw;
+
+ if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
+ adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
+
+ if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode_sw->crtc_hsync_start;
+
+ if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
+ adjusted_mode->crtc_hblank_start =
+ adjusted_mode_sw->crtc_hblank_start;
+
+ if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode_sw->crtc_hblank_end;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
- /*
- * DPLL_MD is not used in case of DSI, reading will get some default value
- * set dpll_md = 0
- */
- pipe_config->dpll_hw_state.dpll_md = 0;
+ if (IS_BROXTON(dev))
+ bxt_dsi_get_pipe_config(encoder, pipe_config);
- pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
+ pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
if (!pclk)
return;
@@ -736,7 +972,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
DRM_DEBUG_KMS("\n");
@@ -772,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-/* return pixels in terms of txbyteclkhs */
-static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
- u16 burst_mode_ratio)
-{
- return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
- 8 * 100), lane_count);
-}
-
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
@@ -787,7 +1015,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
+ unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -849,6 +1077,23 @@ static void set_dsi_timings(struct drm_encoder *encoder,
}
}
+static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ return VID_MODE_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666:
+ return VID_MODE_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return VID_MODE_FORMAT_RGB666_PACKED;
+ case MIPI_DSI_FMT_RGB565:
+ return VID_MODE_FORMAT_RGB565;
+ default:
+ MISSING_CASE(fmt);
+ return VID_MODE_FORMAT_RGB666;
+ }
+}
+
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -858,7 +1103,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum port port;
- unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
+ unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
@@ -917,9 +1162,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
} else {
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
-
- /* XXX: cross-check bpp vs. pixel format? */
- val |= intel_dsi->pixel_format;
+ val |= pixel_format_to_reg(intel_dsi->pixel_format);
}
tmp = 0;
@@ -1059,6 +1302,48 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
+static int intel_dsi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+ if (HAS_GMCH_DISPLAY(dev) &&
+ val == DRM_MODE_SCALE_CENTER) {
+ DRM_DEBUG_KMS("centering not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val)
+ return 0;
+
+ intel_connector->panel.fitting_mode = val;
+ }
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
+ if (crtc && crtc->state->enable) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ intel_crtc_restore_mode(crtc);
+ }
+
+ return 0;
+}
+
static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1101,11 +1386,25 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.detect = intel_dsi_detect,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dsi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
+static void intel_dsi_add_properties(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+
+ if (connector->panel.fixed_mode) {
+ drm_mode_create_scaling_mode_property(dev);
+ drm_object_attach_property(&connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
@@ -1121,11 +1420,13 @@ void intel_dsi_init(struct drm_device *dev)
DRM_DEBUG_KMS("\n");
/* There is no detection method for MIPI so rely on VBT */
- if (!dev_priv->vbt.has_mipi)
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else if (IS_BROXTON(dev)) {
+ dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
return;
@@ -1161,17 +1462,21 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
- /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
- if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
- intel_encoder->crtc_mask = (1 << PIPE_A);
- intel_dsi->ports = (1 << PORT_A);
- } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
- intel_encoder->crtc_mask = (1 << PIPE_B);
- intel_dsi->ports = (1 << PORT_C);
- }
+ /*
+ * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
+ * port C. BXT isn't limited like this.
+ */
+ if (IS_BROXTON(dev_priv))
+ intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ else if (port == PORT_A)
+ intel_encoder->crtc_mask = BIT(PIPE_A);
+ else
+ intel_encoder->crtc_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link)
- intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+ else
+ intel_dsi->ports = BIT(port);
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1223,8 +1528,6 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_connector_register(connector);
-
drm_panel_attach(intel_dsi->panel, connector);
mutex_lock(&dev->mode_config.mutex);
@@ -1242,7 +1545,15 @@ void intel_dsi_init(struct drm_device *dev)
goto err;
}
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+
+ intel_dsi_add_properties(intel_connector);
+
+ drm_connector_register(connector);
+
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 92f39227b..61a6957fc 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,8 +34,6 @@
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
-int dsi_pixel_format_bpp(int pixel_format);
-
struct intel_dsi_host;
struct intel_dsi {
@@ -64,8 +62,12 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
- /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
- u32 pixel_format;
+ /*
+ * video mode pixel format
+ *
+ * XXX: consolidate on .format in struct mipi_dsi_device.
+ */
+ enum mipi_dsi_pixel_format pixel_format;
/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
u32 video_mode_format;
@@ -117,21 +119,25 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return container_of(h, struct intel_dsi_host, base);
}
-#define for_each_dsi_port(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
+#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
}
-extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
-extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port);
+bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void intel_disable_dsi_pll(struct intel_encoder *encoder);
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void intel_dsi_reset_clocks(struct intel_encoder *encoder,
+ enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 7f145b4fe..e498f1c32 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
#define NS_KHZ_RATIO 1000000
-#define GPI0_NC_0_HV_DDI0_HPD 0x4130
-#define GPIO_NC_0_HV_DDI0_PAD 0x4138
-#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
-#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
-#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
-#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
-#define GPIO_NC_3_PANEL0_VDDEN 0x4140
-#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
-#define GPIO_NC_4_PANEL0_BLKEN 0x4150
-#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
-#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
-#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
-#define GPIO_NC_6_PCONF0 0x4180
-#define GPIO_NC_6_PAD 0x4188
-#define GPIO_NC_7_PCONF0 0x4190
-#define GPIO_NC_7_PAD 0x4198
-#define GPIO_NC_8_PCONF0 0x4170
-#define GPIO_NC_8_PAD 0x4178
-#define GPIO_NC_9_PCONF0 0x4100
-#define GPIO_NC_9_PAD 0x4108
-#define GPIO_NC_10_PCONF0 0x40E0
-#define GPIO_NC_10_PAD 0x40E8
-#define GPIO_NC_11_PCONF0 0x40F0
-#define GPIO_NC_11_PAD 0x40F8
-
-struct gpio_table {
- u16 function_reg;
- u16 pad_reg;
- u8 init;
+/* base offsets for gpio pads */
+#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
+#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
+#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
+#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
+#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
+#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
+#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
+#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
+#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
+#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
+#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
+#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
+
+#define VLV_GPIO_PCONF0(base_offset) (base_offset)
+#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
+
+struct gpio_map {
+ u16 base_offset;
+ bool init;
};
-static struct gpio_table gtable[] = {
- { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
- { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
- { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
- { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
- { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
- { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
- { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
- { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
- { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
- { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
- { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
- { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+static struct gpio_map vlv_gpio_table[] = {
+ { VLV_GPIO_NC_0_HV_DDI0_HPD },
+ { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
+ { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
+ { VLV_GPIO_NC_3_PANEL0_VDDEN },
+ { VLV_GPIO_NC_4_PANEL0_BKLTEN },
+ { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
+ { VLV_GPIO_NC_6_HV_DDI1_HPD },
+ { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
+ { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
+ { VLV_GPIO_NC_9_PANEL1_VDDEN },
+ { VLV_GPIO_NC_10_PANEL1_BKLTEN },
+ { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
static inline enum port intel_dsi_seq_port_to_port(u8 port)
@@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
{
- u8 gpio, action;
- u16 function, pad;
- u32 val;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->vbt.dsi.seq_version >= 3)
- data++;
-
- gpio = *data++;
+ struct gpio_map *map;
+ u16 pconf0, padval;
+ u32 tmp;
+ u8 port;
- /* pull up/down */
- action = *data++ & 1;
-
- if (gpio >= ARRAY_SIZE(gtable)) {
- DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
- goto out;
+ if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
+ DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+ return;
}
- if (!IS_VALLEYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
- goto out;
- }
+ map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
- goto out;
+ return;
+ } else {
+ if (gpio_source == 0) {
+ port = IOSF_PORT_GPIO_NC;
+ } else if (gpio_source == 1) {
+ port = IOSF_PORT_GPIO_SC;
+ } else {
+ DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ return;
+ }
}
- function = gtable[gpio].function_reg;
- pad = gtable[gpio].pad_reg;
+ pconf0 = VLV_GPIO_PCONF0(map->base_offset);
+ padval = VLV_GPIO_PAD_VAL(map->base_offset);
mutex_lock(&dev_priv->sb_lock);
- if (!gtable[gpio].init) {
- /* program the function */
+ if (!map->init) {
/* FIXME: remove constant below */
- vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
- 0x2000CC00);
- gtable[gpio].init = 1;
+ vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
+ map->init = true;
}
- val = 0x4 | action;
+ tmp = 0x4 | value;
+ vlv_iosf_sb_write(dev_priv, port, padval, tmp);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 gpio_source, gpio_index;
+ bool value;
+
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ data++;
+
+ gpio_index = *data++;
+
+ /* gpio source in sequence v2 only */
+ if (dev_priv->vbt.dsi.seq_version == 2)
+ gpio_source = (*data >> 1) & 3;
+ else
+ gpio_source = 0;
/* pull up/down */
- vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
- mutex_unlock(&dev_priv->sb_lock);
+ value = *data++ & 1;
+
+ if (IS_VALLEYVIEW(dev_priv))
+ vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else
+ DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
-out:
return data;
}
@@ -420,7 +431,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
struct vbt_panel *vbt_panel;
- u32 bits_per_pixel = 24;
+ u32 bpp;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
@@ -436,12 +447,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
- intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+ intel_dsi->pixel_format =
+ pixel_format_from_register_bits(
+ mipi_config->videomode_color_format << 7);
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
-
- bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
-
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
@@ -475,8 +487,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
*/
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
if (mipi_config->target_burst_mode_freq) {
- computed_ddr =
- (pclk * bits_per_pixel) / intel_dsi->lane_count;
+ computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
@@ -499,7 +510,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
intel_dsi->pclk = pclk;
- bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
+ bitrate = (pclk * bpp) / intel_dsi->lane_count;
switch (intel_dsi->escape_clk_div) {
case 0:
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 70883c54c..1765e6e18 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,33 +30,7 @@
#include "i915_drv.h"
#include "intel_dsi.h"
-int dsi_pixel_format_bpp(int pixel_format)
-{
- int bpp;
-
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
-
- return bpp;
-}
-
-struct dsi_mnp {
- u32 dsi_pll_ctrl;
- u32 dsi_pll_div;
-};
-
-static const u32 lfsr_converts[] = {
+static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
@@ -64,10 +38,11 @@ static const u32 lfsr_converts[] = {
};
/* Get DSI clock from pixel clock */
-static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
+static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
+ int lane_count)
{
u32 dsi_clk_khz;
- u32 bpp = dsi_pixel_format_bpp(pixel_format);
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt);
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
@@ -77,7 +52,8 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
}
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
- struct dsi_mnp *dsi_mnp, int target_dsi_clk)
+ struct intel_crtc_state *config,
+ int target_dsi_clk)
{
unsigned int calc_m = 0, calc_p = 0;
unsigned int m_min, m_max, p_min = 2, p_max = 6;
@@ -123,8 +99,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* register has log2(N1), this works fine for powers of two */
n = ffs(n) - 1;
m_seed = lfsr_converts[calc_m - 62];
- dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
- dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
+ config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+ config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
return 0;
@@ -134,54 +110,55 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
-static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
- struct dsi_mnp dsi_mnp;
u32 dsi_clk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
+ ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
- return;
+ return ret;
}
if (intel_dsi->ports & (1 << PORT_A))
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+ config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
if (intel_dsi->ports & (1 << PORT_C))
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+ config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+
+ config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
- dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+ config->dsi_pll.div, config->dsi_pll.ctrl);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+ return 0;
}
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- u32 tmp;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->sb_lock);
- vlv_configure_dsi_pll(encoder);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+ config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO */
usleep_range(1, 10);
- tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- tmp |= DSI_PLL_VCO_EN;
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
@@ -197,7 +174,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -212,9 +189,39 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
+static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+ bool enabled;
+ u32 val;
+ u32 mask;
+
+ mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
+ val = I915_READ(BXT_DSI_PLL_ENABLE);
+ enabled = (val & mask) == mask;
+
+ if (!enabled)
+ return false;
+
+ /*
+ * Both dividers must be programmed with valid values even if only one
+ * of the PLL is used, see BSpec/Broxton Clocks. Check this here for
+ * paranoia, since BIOS is known to misconfigure PLLs in this way at
+ * times, and since accessing DSI registers with invalid dividers
+ * causes a system hang.
+ */
+ val = I915_READ(BXT_DSI_PLL_CTL);
+ if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
+ DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
+ val);
+ enabled = false;
+ }
+
+ return enabled;
+}
+
static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
DRM_DEBUG_KMS("\n");
@@ -232,23 +239,24 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
-static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
+static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
{
- int bpp = dsi_pixel_format_bpp(pixel_format);
+ int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
bpp, pipe_bpp);
}
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = 25000;
+ int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
int i;
DRM_DEBUG_KMS("\n");
@@ -258,6 +266,9 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
mutex_unlock(&dev_priv->sb_lock);
+ config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
+ config->dsi_pll.div = pll_div;
+
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
@@ -303,7 +314,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
u32 pclk;
u32 dsi_clk;
@@ -317,15 +329,9 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return 0;
}
- dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
- BXT_DSI_PLL_RATIO_MASK;
+ config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
- /* Invalid DSI ratio ? */
- if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
- dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
- DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
- return 0;
- }
+ dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
@@ -338,12 +344,13 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
if (IS_BROXTON(encoder->base.dev))
- return bxt_dsi_get_pclk(encoder, pipe_bpp);
+ return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
- return vlv_dsi_get_pclk(encoder, pipe_bpp);
+ return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
}
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -360,51 +367,72 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
+static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+ const struct intel_crtc_state *config)
{
- u32 tmp;
- u32 divider;
- u32 dsi_rate;
- u32 pll_ratio;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+ u32 dsi_rate = 0;
+ u32 pll_ratio = 0;
+ u32 rx_div;
+ u32 tx_div;
+ u32 rx_div_upper;
+ u32 rx_div_lower;
+ u32 mipi_8by3_divider;
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
- tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
/* Get the current DSI rate(actual) */
- pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
- BXT_DSI_PLL_RATIO_MASK;
+ pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
- /* Max possible output of clock is 39.5 MHz, program value -1 */
- divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
- tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
+ /*
+ * tx clock should be <= 20MHz and the div value must be
+ * subtracted by 1 as per bspec
+ */
+ tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1;
+ /*
+ * rx clock should be <= 150MHz and the div value must be
+ * subtracted by 1 as per bspec
+ */
+ rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1;
/*
- * Tx escape clock must be as close to 20MHz possible, but should
- * not exceed it. Hence select divide by 2
+ * rx divider value needs to be updated in the
+ * two differnt bit fields in the register hence splitting the
+ * rx divider value accordingly
*/
- tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
+ rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
+ rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
- tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
+ /* As per bpsec program the 8/3X clock divider to the below value */
+ if (dev_priv->vbt.dsi.config->is_cmd_mode)
+ mipi_8by3_divider = 0x2;
+ else
+ mipi_8by3_divider = 0x3;
+
+ tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
+ tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
+ tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
+ tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
-static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
+static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u8 dsi_ratio;
u32 dsi_clk;
- u32 val;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
- intel_dsi->lane_count);
+ intel_dsi->lane_count);
/*
* From clock diagram, to get PLL ratio divider, divide double of DSI
@@ -413,9 +441,9 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
- dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+ dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
- return false;
+ return -ECHRNG;
}
/*
@@ -423,27 +451,19 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
- val = I915_READ(BXT_DSI_PLL_CTL);
- val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
- val &= ~BXT_DSI_FREQ_SEL_MASK;
- val &= ~BXT_DSI_PLL_RATIO_MASK;
- val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
+ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (dsi_ratio <= 50) {
- val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
- val |= BXT_DSI_PLL_PVD_RATIO_1;
- }
+ if (dsi_ratio <= 50)
+ config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
- I915_WRITE(BXT_DSI_PLL_CTL, val);
- POSTING_READ(BXT_DSI_PLL_CTL);
-
- return true;
+ return 0;
}
-static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
+static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -452,23 +472,13 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- val = I915_READ(BXT_DSI_PLL_ENABLE);
-
- if (val & BXT_DSI_PLL_DO_ENABLE) {
- WARN(1, "DSI PLL already enabled. Disabling it.\n");
- val &= ~BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
- }
-
/* Configure PLL vales */
- if (!bxt_configure_dsi_pll(encoder)) {
- DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
- return;
- }
+ I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ POSTING_READ(BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port);
+ bxt_dsi_program_clocks(encoder->base.dev, port, config);
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
@@ -484,14 +494,38 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-void intel_enable_dsi_pll(struct intel_encoder *encoder)
+bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROXTON(dev_priv))
+ return bxt_dsi_pll_is_enabled(dev_priv);
+
+ MISSING_CASE(INTEL_DEVID(dev_priv));
+
+ return false;
+}
+
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_device *dev = encoder->base.dev;
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ return vlv_compute_dsi_pll(encoder, config);
+ else if (IS_BROXTON(dev))
+ return bxt_compute_dsi_pll(encoder, config);
+
+ return -ENODEV;
+}
+
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- vlv_enable_dsi_pll(encoder);
+ vlv_enable_dsi_pll(encoder, config);
else if (IS_BROXTON(dev))
- bxt_enable_dsi_pll(encoder);
+ bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
@@ -513,9 +547,9 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
- tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 28f440772..647127f3a 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1;
int ret;
u64 end;
@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+ end = ggtt->stolen_size - 8 * 1024 * 1024;
else
- end = dev_priv->gtt.stolen_usable_size;
+ end = ggtt->stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index c607217c1..ab8d09a81 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj = NULL;
int size, ret;
@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size * 2 < dev_priv->gtt.stolen_usable_size)
+ if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
@@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
+ ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
if (ret)
goto out_unlock;
@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+ info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+ ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -379,6 +381,7 @@ retry:
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *new_crtc;
+ struct intel_crtc *intel_crtc;
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
@@ -420,6 +423,13 @@ retry:
num_connectors_enabled++;
+ intel_crtc = to_intel_crtc(connector->state->crtc);
+ for (j = 0; j < 256; j++) {
+ intel_crtc->lut_r[j] = j;
+ intel_crtc->lut_g[j] = j;
+ intel_crtc->lut_b[j] = j;
+ }
+
new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
/*
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index bda526660..9be839a24 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -212,7 +212,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
POSTING_READ(SERR_INT);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
+ DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
transcoder_name(pch_transcoder));
}
@@ -235,7 +235,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
if (old && I915_READ(SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
transcoder_name(pch_transcoder));
}
}
@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv->dev))
+ if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
enable);
else
@@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
return;
/* GMCH can't disable fifo underruns, filter them. */
- if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
return;
@@ -386,7 +386,7 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
{
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false))
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+ DRM_ERROR("PCH transcoder %s FIFO underrun\n",
transcoder_name(pch_transcoder));
}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 73002e901..9d79c4c3e 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -27,8 +27,34 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
+struct drm_i915_gem_request;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process decriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ *
+ * Finally, we also keep a few statistics here, including the number of
+ * submissions to each engine, and a record of the last submission failure
+ * (if any).
+ */
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
+ void *client_base; /* first page (only) of above */
struct intel_context *owner;
struct intel_guc *guc;
uint32_t priority;
@@ -43,13 +69,14 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
- uint32_t wq_head;
+ uint32_t unused; /* Was 'wq_head' */
/* GuC submission statistics & status */
uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
+ int spare; /* pad to 32 DWords */
};
enum intel_guc_fw_status {
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index ef46f976f..6021d5872 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -81,14 +81,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i, irqs;
+ struct intel_engine_cs *engine;
+ int irqs;
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MODE_GEN7(ring), irqs);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -98,14 +98,14 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i, irqs;
+ struct intel_engine_cs *engine;
+ int irqs;
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MODE_GEN7(ring), irqs);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
@@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
return ret;
}
+static int i915_reset_guc(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 guc_status;
+
+ ret = intel_guc_reset(dev_priv);
+ if (ret) {
+ DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = I915_READ(GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
+
+ return ret;
+}
+
/**
* intel_guc_ucode_load() - load GuC uCode into the device
* @dev: drm device
@@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- int err = 0;
+ int retries, err = 0;
if (!i915.enable_guc_submission)
return 0;
@@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (err)
goto fail;
- err = guc_ucode_xfer(dev_priv);
- if (err)
- goto fail;
+ /*
+ * WaEnableuKernelHeaderValidFix:skl,bxt
+ * For BXT, this is only upto B0 but below WA is required for later
+ * steppings also so this is extended as well.
+ */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt */
+ for (retries = 3; ; ) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ err = i915_reset_guc(dev_priv);
+ if (err) {
+ DRM_ERROR("GuC reset failed, err %d\n", err);
+ goto fail;
+ }
+
+ err = guc_ucode_xfer(dev_priv);
+ if (!err)
+ break;
+
+ if (--retries == 0)
+ goto fail;
+
+ DRM_INFO("GuC fw load failed, err %d; will reset and "
+ "retry %d more time(s)\n", err, retries);
+ }
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0;
fail:
+ DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
@@ -595,8 +638,8 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = NULL;
} else if (IS_SKYLAKE(dev)) {
fw_path = I915_SKL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = 4;
- guc_fw->guc_fw_minor_wanted = 3;
+ guc_fw->guc_fw_major_wanted = 6;
+ guc_fw->guc_fw_minor_wanted = 1;
} else {
i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3ddb4fac5..a8844702d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv->dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
@@ -970,10 +970,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- if (HAS_PCH_SPLIT(dev_priv->dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+ pipe_config->lane_count = 4;
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
@@ -1375,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+ pipe_config->lane_count = 4;
+
return true;
}
@@ -1395,16 +1396,38 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ enum port port = hdmi_to_dig_port(hdmi)->port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
- if (type == DRM_DP_DUAL_MODE_NONE ||
- type == DRM_DP_DUAL_MODE_UNKNOWN)
+ /*
+ * Type 1 DVI adaptors are not required to implement any
+ * registers, so we can't always detect their presence.
+ * Ideally we should be able to check the state of the
+ * CONFIG1 pin, but no such luck on our hardware.
+ *
+ * The only method left to us is to check the VBT to see
+ * if the port is a dual mode capable DP port. But let's
+ * only do that when we sucesfully read the EDID, to avoid
+ * confusing log messages about DP dual mode adaptors when
+ * there's nothing connected to the port.
+ */
+ if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
+ if (has_edid &&
+ intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ type = DRM_DP_DUAL_MODE_TYPE1_DVI;
+ } else {
+ type = DRM_DP_DUAL_MODE_NONE;
+ }
+ }
+
+ if (type == DRM_DP_DUAL_MODE_NONE)
return;
hdmi->dp_dual_mode.type = type;
@@ -1431,7 +1454,7 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
- intel_hdmi_dp_dual_mode_detect(connector);
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
@@ -2119,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 52fbe530f..81de23098 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,7 +124,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev_priv->dev))
+ if (!IS_PINEVIEW(dev_priv))
return;
val = I915_READ(DSPCLK_GATE_D);
@@ -264,7 +264,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
- if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ if (!HAS_GMBUS_IRQ(dev_priv))
gmbus4_irq_en = 0;
/* Important: The hw handles only the first bit, so set only one! Since
@@ -300,7 +300,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
- if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ if (!HAS_GMBUS_IRQ(dev_priv))
return wait_for(C, 10);
/* Important: The hw handles only the first bit, so set only one! */
@@ -571,15 +571,14 @@ clear_err:
goto out;
timeout:
- DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
+ DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
* instead. Use EAGAIN to have i2c core retry.
*/
- bus->force_bit = 1;
ret = -EAGAIN;
out:
@@ -597,10 +596,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
- if (bus->force_bit)
+ if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
- else
+ if (ret < 0)
+ bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
+ } else {
ret = do_gmbus_xfer(adapter, msgs, num);
+ if (ret == -EAGAIN)
+ bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
+ }
mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@@ -718,11 +722,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
+
+ mutex_unlock(&dev_priv->gmbus_mutex);
}
void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5c6080fd0..7f2d8415e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -131,6 +131,7 @@
* preemption, but just sampling the new tail pointer).
*
*/
+#include <linux/interrupt.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
@@ -228,9 +229,6 @@ enum {
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *default_ctx_obj);
-
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -266,20 +264,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
}
static void
-logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
+
+ if (IS_GEN8(dev) || IS_GEN9(dev))
+ engine->idle_lite_restore_wa = ~0;
- ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
- (ring->id == VCS || ring->id == VCS2);
+ (engine->id == VCS || engine->id == VCS2);
- ring->ctx_desc_template = GEN8_CTX_VALID;
- ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+ engine->ctx_desc_template = GEN8_CTX_VALID;
+ engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(dev))
- ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
- ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+ engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+ engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
@@ -287,8 +288,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
- if (ring->disable_lite_restore_wa)
- ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+ if (engine->disable_lite_restore_wa)
+ engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
}
/**
@@ -311,24 +312,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
*/
static void
intel_lr_context_descriptor_update(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
uint64_t lrca, desc;
- lrca = ctx->engine[ring->id].lrc_vma->node.start +
+ lrca = ctx->engine[engine->id].lrc_vma->node.start +
LRC_PPHWSP_PN * PAGE_SIZE;
- desc = ring->ctx_desc_template; /* bits 0-11 */
+ desc = engine->ctx_desc_template; /* bits 0-11 */
desc |= lrca; /* bits 12-31 */
desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
- ctx->engine[ring->id].lrc_desc = desc;
+ ctx->engine[engine->id].lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- return ctx->engine[ring->id].lrc_desc;
+ return ctx->engine[engine->id].lrc_desc;
}
/**
@@ -348,98 +349,103 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
* Return: 20-bits globally unique context ID.
*/
u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
+ return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
}
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
- struct intel_engine_cs *ring = rq0->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = rq0->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t desc[2];
if (rq1) {
- desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
+ desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
rq1->elsp_submitted++;
} else {
desc[1] = 0;
}
- desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
+ desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
rq0->elsp_submitted++;
/* You must always write both descriptors in the order below. */
- spin_lock(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
- I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+ I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
- I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+ I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
/* ELSP is a wo register, use another nearby reg for posting */
- POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
- spin_unlock(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
}
-static int execlists_update_context(struct drm_i915_gem_request *rq)
+static void
+execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
- struct intel_engine_cs *ring = rq->ring;
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+}
+
+static void execlists_update_context(struct drm_i915_gem_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
- uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
+ uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
- if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- /* True 32b PPGTT with dynamic page allocation: update PDP
- * registers and point the unallocated PDPs to scratch page.
- * PML4 is allocated during ppgtt init, so this is not needed
- * in 48-bit mode.
- */
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
- }
-
- return 0;
+ /* True 32b PPGTT with dynamic page allocation: update PDP
+ * registers and point the unallocated PDPs to scratch page.
+ * PML4 is allocated during ppgtt init, so this is not needed
+ * in 48-bit mode.
+ */
+ if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ execlists_update_context_pdps(ppgtt, reg_state);
}
static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
+ struct drm_i915_private *dev_priv = rq0->i915;
+ unsigned int fw_domains = rq0->engine->fw_domains;
+
execlists_update_context(rq0);
if (rq1)
execlists_update_context(rq1);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
execlists_elsp_write(rq0, rq1);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
- struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_gem_request *cursor, *tmp;
- assert_spin_locked(&ring->execlist_lock);
+ assert_spin_locked(&engine->execlist_lock);
/*
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
- WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
-
- if (list_empty(&ring->execlist_queue))
- return;
+ WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
/* Try to read in pairs */
- list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
execlist_link) {
if (!req0) {
req0 = cursor;
@@ -448,172 +454,179 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_move_tail(&req0->execlist_link,
- &ring->execlist_retired_req_list);
+ &engine->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
+ WARN_ON(req1->elsp_submitted);
break;
}
}
- if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+ if (unlikely(!req0))
+ return;
+
+ if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
/*
- * WaIdleLiteRestore: make sure we never cause a lite
- * restore with HEAD==TAIL
+ * WaIdleLiteRestore: make sure we never cause a lite restore
+ * with HEAD==TAIL.
+ *
+ * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
+ * resubmit the request. See gen8_emit_request() for where we
+ * prepare the padding after the end of the request.
*/
- if (req0->elsp_submitted) {
- /*
- * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_request()
- * for where we prepare the padding after the end of the
- * request.
- */
- struct intel_ringbuffer *ringbuf;
-
- ringbuf = req0->ctx->engine[ring->id].ringbuf;
- req0->tail += 8;
- req0->tail &= ringbuf->size - 1;
- }
- }
+ struct intel_ringbuffer *ringbuf;
- WARN_ON(req1 && req1->elsp_submitted);
+ ringbuf = req0->ctx->engine[engine->id].ringbuf;
+ req0->tail += 8;
+ req0->tail &= ringbuf->size - 1;
+ }
execlists_submit_requests(req0, req1);
}
-static bool execlists_check_remove_request(struct intel_engine_cs *ring,
- u32 request_id)
+static unsigned int
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
{
struct drm_i915_gem_request *head_req;
- assert_spin_locked(&ring->execlist_lock);
+ assert_spin_locked(&engine->execlist_lock);
- head_req = list_first_entry_or_null(&ring->execlist_queue,
+ head_req = list_first_entry_or_null(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
- if (head_req != NULL) {
- if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
- WARN(head_req->elsp_submitted == 0,
- "Never submitted head request\n");
+ if (!head_req)
+ return 0;
- if (--head_req->elsp_submitted <= 0) {
- list_move_tail(&head_req->execlist_link,
- &ring->execlist_retired_req_list);
- return true;
- }
- }
- }
+ if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
+ return 0;
+
+ WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
+
+ if (--head_req->elsp_submitted > 0)
+ return 0;
+
+ list_move_tail(&head_req->execlist_link,
+ &engine->execlist_retired_req_list);
- return false;
+ return 1;
}
-static void get_context_status(struct intel_engine_cs *ring,
- u8 read_pointer,
- u32 *status, u32 *context_id)
+static u32
+get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
+ u32 *context_id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ u32 status;
- if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
- return;
+ read_pointer %= GEN8_CSB_ENTRIES;
+
+ status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
+
+ if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+ return 0;
+
+ *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
+ read_pointer));
- *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
- *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+ return status;
}
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
- * @ring: Engine Command Streamer to handle.
+ * @engine: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+static void intel_lrc_irq_handler(unsigned long data)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 status_pointer;
- u8 read_pointer;
- u8 write_pointer;
- u32 status = 0;
- u32 status_id;
- u32 submit_contexts = 0;
+ unsigned int read_pointer, write_pointer;
+ u32 csb[GEN8_CSB_ENTRIES][2];
+ unsigned int csb_read = 0, i;
+ unsigned int submit_contexts = 0;
+
+ intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
- read_pointer = ring->next_context_status_buffer;
+ read_pointer = engine->next_context_status_buffer;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
- spin_lock(&ring->execlist_lock);
-
while (read_pointer < write_pointer) {
+ if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
+ break;
+ csb[csb_read][0] = get_context_status(engine, ++read_pointer,
+ &csb[csb_read][1]);
+ csb_read++;
+ }
- get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
- &status, &status_id);
+ engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
- if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
- continue;
+ /* Update the read pointer to the old write pointer. Manual ringbuffer
+ * management ftw </sarcasm> */
+ I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
+ _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+ engine->next_context_status_buffer << 8));
- if (status & GEN8_CTX_STATUS_PREEMPTED) {
- if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
- if (execlists_check_remove_request(ring, status_id))
+ intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+
+ spin_lock(&engine->execlist_lock);
+
+ for (i = 0; i < csb_read; i++) {
+ if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
+ if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
+ if (execlists_check_remove_request(engine, csb[i][1]))
WARN(1, "Lite Restored request removed from queue\n");
} else
WARN(1, "Preemption without Lite Restore\n");
}
- if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
- (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
- if (execlists_check_remove_request(ring, status_id))
- submit_contexts++;
- }
+ if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
+ GEN8_CTX_STATUS_ELEMENT_SWITCH))
+ submit_contexts +=
+ execlists_check_remove_request(engine, csb[i][1]);
}
- if (ring->disable_lite_restore_wa) {
- /* Prevent a ctx to preempt itself */
- if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
- (submit_contexts != 0))
- execlists_context_unqueue(ring);
- } else if (submit_contexts != 0) {
- execlists_context_unqueue(ring);
+ if (submit_contexts) {
+ if (!engine->disable_lite_restore_wa ||
+ (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
+ execlists_context_unqueue(engine);
}
- spin_unlock(&ring->execlist_lock);
+ spin_unlock(&engine->execlist_lock);
if (unlikely(submit_contexts > 2))
DRM_ERROR("More than two context complete events?\n");
-
- ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
- /* Update the read pointer to the old write pointer. Manual ringbuffer
- * management ftw </sarcasm> */
- I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- ring->next_context_status_buffer << 8));
}
-static int execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_context_queue(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *engine = request->engine;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
if (request->ctx != request->i915->kernel_context)
- intel_lr_context_pin(request->ctx, ring);
+ intel_lr_context_pin(request->ctx, engine);
i915_gem_request_reference(request);
- spin_lock_irq(&ring->execlist_lock);
+ spin_lock_bh(&engine->execlist_lock);
- list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+ list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
if (++num_elements > 2)
break;
if (num_elements > 2) {
struct drm_i915_gem_request *tail_req;
- tail_req = list_last_entry(&ring->execlist_queue,
+ tail_req = list_last_entry(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
@@ -621,41 +634,39 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_move_tail(&tail_req->execlist_link,
- &ring->execlist_retired_req_list);
+ &engine->execlist_retired_req_list);
}
}
- list_add_tail(&request->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &engine->execlist_queue);
if (num_elements == 0)
- execlists_context_unqueue(ring);
-
- spin_unlock_irq(&ring->execlist_lock);
+ execlists_context_unqueue(engine);
- return 0;
+ spin_unlock_bh(&engine->execlist_lock);
}
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
uint32_t flush_domains;
int ret;
flush_domains = 0;
- if (ring->gpu_caches_dirty)
+ if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(req->ring);
+ const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -665,7 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->ring, &req);
+ ret = i915_gem_object_sync(obj, req->engine, &req);
if (ret)
return ret;
}
@@ -689,7 +700,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
{
int ret = 0;
- request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+ request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
if (i915.enable_guc_submission) {
/*
@@ -705,53 +716,11 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
}
if (request->ctx != request->i915->kernel_context)
- ret = intel_lr_context_pin(request->ctx, request->ring);
+ ret = intel_lr_context_pin(request->ctx, request->engine);
return ret;
}
-static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
- int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_gem_request *target;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= bytes)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(target, &ring->request_list, list) {
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- if (target->ringbuf != ringbuf)
- continue;
-
- /* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= bytes)
- break;
- }
-
- if (WARN_ON(&target->list == &ring->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(target);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @request: Request to advance the logical ringbuffer of.
@@ -766,7 +735,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
- struct intel_engine_cs *engine = request->ring;
+ struct intel_engine_cs *engine = request->engine;
intel_logical_ring_advance(ringbuf);
request->tail = ringbuf->tail;
@@ -781,7 +750,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
- if (intel_ring_stopped(engine))
+ if (intel_engine_stopped(engine))
return 0;
if (engine->last_context != request->ctx) {
@@ -803,101 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
return 0;
}
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-}
-
-static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
- int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
- bool need_wrap = false;
-
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
-
- if (unlikely(bytes > remain_usable)) {
- /*
- * Not enough space for the basic request. So need to flush
- * out the remainder and then wait for base + reserved.
- */
- wait_bytes = remain_actual + total_bytes;
- need_wrap = true;
- } else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
- }
-
- if (wait_bytes) {
- ret = logical_ring_wait_for_space(req, wait_bytes);
- if (unlikely(ret))
- return ret;
-
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
- }
-
- return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
- *
- * @req: The request to start some new work for
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
- * and also preallocates a request (every workload submission is still mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
- struct drm_i915_private *dev_priv;
- int ret;
-
- WARN_ON(req == NULL);
- dev_priv = req->ring->dev->dev_private;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- req->ringbuf->space -= num_dwords * sizeof(uint32_t);
- return 0;
-}
-
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
{
/*
@@ -910,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
- return intel_logical_ring_begin(request, 0);
+ return intel_ring_begin(request, 0);
}
/**
@@ -935,9 +809,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
- struct intel_engine_cs *ring = params->ring;
+ struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+ struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
u32 instp_mask;
@@ -949,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
@@ -978,9 +852,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
- if (ring == &dev_priv->ring[RCS] &&
+ if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(params->request, 4);
+ ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
@@ -996,116 +870,116 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
exec_start = params->batch_obj_vm_offset +
args->batch_start_offset;
- ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+ ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
- i915_gem_execbuffer_retire_commands(params);
return 0;
}
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+void intel_execlists_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req, *tmp;
struct list_head retired_list;
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (list_empty(&ring->execlist_retired_req_list))
+ WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ if (list_empty(&engine->execlist_retired_req_list))
return;
INIT_LIST_HEAD(&retired_list);
- spin_lock_irq(&ring->execlist_lock);
- list_replace_init(&ring->execlist_retired_req_list, &retired_list);
- spin_unlock_irq(&ring->execlist_lock);
+ spin_lock_bh(&engine->execlist_lock);
+ list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+ spin_unlock_bh(&engine->execlist_lock);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[ring->id].state;
+ ctx->engine[engine->id].state;
if (ctx_obj && (ctx != req->i915->kernel_context))
- intel_lr_context_unpin(ctx, ring);
+ intel_lr_context_unpin(ctx, engine);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
}
-void intel_logical_ring_stop(struct intel_engine_cs *ring)
+void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
int ret;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- ret = intel_ring_idle(ring);
- if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ ret = intel_engine_idle(engine);
+ if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- ring->name, ret);
+ engine->name, ret);
/* TODO: Is this correct with Execlists enabled? */
- I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
- DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
- I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
- if (!ring->gpu_caches_dirty)
+ if (!engine->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+ ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
static int intel_lr_context_do_pin(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
- struct page *lrc_state_page;
- uint32_t *lrc_reg_state;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+ void *vaddr;
+ u32 *lrc_reg_state;
int ret;
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
- lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- if (WARN_ON(!lrc_state_page)) {
- ret = -ENODEV;
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
}
- ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+
+ ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret)
- goto unpin_ctx_obj;
+ goto unpin_map;
- ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
- intel_lr_context_descriptor_update(ctx, ring);
- lrc_reg_state = kmap(lrc_state_page);
+ ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
- ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
+ ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
@@ -1114,6 +988,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
return ret;
+unpin_map:
+ i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
@@ -1146,7 +1022,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) {
- kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+ i915_gem_object_unpin_map(ctx_obj);
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
@@ -1160,21 +1036,21 @@ void intel_lr_context_unpin(struct intel_context *ctx,
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
- ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+ ret = intel_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
@@ -1187,7 +1063,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_logical_ring_advance(ringbuf);
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -1223,25 +1099,27 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
* This WA is also required for Gen9 so extracting as a function avoids
* code duplication.
*/
-static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t index)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl
+ * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1259,7 +1137,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
return index;
@@ -1312,7 +1190,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
* Return: non-zero if we exceed the PAGE_SIZE limit.
*/
-static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
@@ -1324,8 +1202,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(ring->dev)) {
- int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ if (IS_BROADWELL(engine->dev)) {
+ int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (rc < 0)
return rc;
index = rc;
@@ -1333,7 +1211,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
- scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+ scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1375,7 +1253,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
* This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
* to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
*/
-static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
@@ -1390,13 +1268,14 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
return wa_ctx_end(wa_ctx, *offset = index, 1);
}
-static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1405,11 +1284,27 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
- ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (ret < 0)
return ret;
index = ret;
+ /* WaClearSlmSpaceAtContextSwitch:kbl */
+ /* Actual scratch location is at 128 bytes offset */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+ uint32_t scratch_addr
+ = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1417,12 +1312,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}
-static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1435,6 +1330,25 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, MI_NOOP);
}
+ /* WaClearTdlStateAckDirtyBits:bxt */
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
+
+ wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+ wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+ wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+ wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
+ /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
+ wa_ctx_emit(batch, index, 0x0);
+ wa_ctx_emit(batch, index, MI_NOOP);
+ }
+
/* WaDisableCtxRestoreArbitration:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
@@ -1445,60 +1359,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
return wa_ctx_end(wa_ctx, *offset = index, 1);
}
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
int ret;
- ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
- if (!ring->wa_ctx.obj) {
+ engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
+ PAGE_ALIGN(size));
+ if (!engine->wa_ctx.obj) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
return -ENOMEM;
}
- ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+ ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
if (ret) {
DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
ret);
- drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+ drm_gem_object_unreference(&engine->wa_ctx.obj->base);
return ret;
}
return 0;
}
-static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
{
- if (ring->wa_ctx.obj) {
- i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
- drm_gem_object_unreference(&ring->wa_ctx.obj->base);
- ring->wa_ctx.obj = NULL;
+ if (engine->wa_ctx.obj) {
+ i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
+ drm_gem_object_unreference(&engine->wa_ctx.obj->base);
+ engine->wa_ctx.obj = NULL;
}
}
-static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+static int intel_init_workaround_bb(struct intel_engine_cs *engine)
{
int ret;
uint32_t *batch;
uint32_t offset;
struct page *page;
- struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- WARN_ON(ring->id != RCS);
+ WARN_ON(engine->id != RCS);
/* update this when WA for higher Gen are added */
- if (INTEL_INFO(ring->dev)->gen > 9) {
+ if (INTEL_INFO(engine->dev)->gen > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_INFO(ring->dev)->gen);
+ INTEL_INFO(engine->dev)->gen);
return 0;
}
/* some WA perform writes to scratch page, ensure it is valid */
- if (ring->scratch.obj == NULL) {
- DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+ if (engine->scratch.obj == NULL) {
+ DRM_ERROR("scratch page not allocated for %s\n", engine->name);
return -EINVAL;
}
- ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+ ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
if (ret) {
DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
return ret;
@@ -1508,29 +1423,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
batch = kmap_atomic(page);
offset = 0;
- if (INTEL_INFO(ring->dev)->gen == 8) {
- ret = gen8_init_indirectctx_bb(ring,
+ if (INTEL_INFO(engine->dev)->gen == 8) {
+ ret = gen8_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
- ret = gen8_init_perctx_bb(ring,
+ ret = gen8_init_perctx_bb(engine,
&wa_ctx->per_ctx,
batch,
&offset);
if (ret)
goto out;
- } else if (INTEL_INFO(ring->dev)->gen == 9) {
- ret = gen9_init_indirectctx_bb(ring,
+ } else if (INTEL_INFO(engine->dev)->gen == 9) {
+ ret = gen9_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
- ret = gen9_init_perctx_bb(ring,
+ ret = gen9_init_perctx_bb(engine,
&wa_ctx->per_ctx,
batch,
&offset);
@@ -1541,27 +1456,36 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
out:
kunmap_atomic(batch);
if (ret)
- lrc_destroy_wa_ctx_obj(ring);
+ lrc_destroy_wa_ctx_obj(engine);
return ret;
}
-static int gen8_init_common_ring(struct intel_engine_cs *ring)
+static void lrc_init_hws(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+ (u32)engine->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *engine)
+{
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u8 next_context_status_buffer_hw;
+ unsigned int next_context_status_buffer_hw;
- lrc_setup_hardware_status_page(ring,
- dev_priv->kernel_context->engine[ring->id].state);
+ lrc_init_hws(engine);
- I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
- I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
- POSTING_READ(RING_MODE_GEN7(ring));
+ POSTING_READ(RING_MODE_GEN7(engine));
/*
* Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1576,7 +1500,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
* BXT | ? | ? |
*/
next_context_status_buffer_hw =
- GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
+ GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
/*
* When the CSB registers are reset (also after power-up / gpu reset),
@@ -1586,21 +1510,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
- ring->next_context_status_buffer = next_context_status_buffer_hw;
- DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+ engine->next_context_status_buffer = next_context_status_buffer_hw;
+ DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+ intel_engine_init_hangcheck(engine);
- return 0;
+ return intel_mocs_init_engine(engine);
}
-static int gen8_init_render_ring(struct intel_engine_cs *ring)
+static int gen8_init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = gen8_init_common_ring(ring);
+ ret = gen8_init_common_ring(engine);
if (ret)
return ret;
@@ -1614,29 +1538,29 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return init_workarounds_ring(ring);
+ return init_workarounds_ring(engine);
}
-static int gen9_init_render_ring(struct intel_engine_cs *ring)
+static int gen9_init_render_ring(struct intel_engine_cs *engine)
{
int ret;
- ret = gen8_init_common_ring(ring);
+ ret = gen8_init_common_ring(engine);
if (ret)
return ret;
- return init_workarounds_ring(ring);
+ return init_workarounds_ring(engine);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
- ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+ ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
if (ret)
return ret;
@@ -1644,9 +1568,11 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+ intel_logical_ring_emit_reg(ringbuf,
+ GEN8_RING_PDP_UDW(engine, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
- intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+ intel_logical_ring_emit_reg(ringbuf,
+ GEN8_RING_PDP_LDW(engine, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
}
@@ -1670,7 +1596,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
if (req->ctx->ppgtt &&
- (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+ (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
!intel_vgpu_active(req->i915->dev)) {
ret = intel_logical_ring_emit_pdps(req);
@@ -1678,10 +1604,10 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return ret;
}
- req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+ req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
}
- ret = intel_logical_ring_begin(req, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1698,9 +1624,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1708,25 +1634,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
- POSTING_READ(RING_IMR(ring->mmio_base));
+ if (engine->irq_refcount++ == 0) {
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
- POSTING_READ(RING_IMR(ring->mmio_base));
+ if (--engine->irq_refcount == 0) {
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1736,13 +1663,13 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
u32 unused)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = ringbuf->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(request, 4);
+ ret = intel_ring_begin(request, 4);
if (ret)
return ret;
@@ -1757,7 +1684,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
cmd |= MI_INVALIDATE_TLB;
- if (ring == &dev_priv->ring[VCS])
+ if (engine == &dev_priv->engine[VCS])
cmd |= MI_INVALIDATE_BSD;
}
@@ -1777,11 +1704,12 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 flush_domains)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa = false;
+ struct intel_engine_cs *engine = ringbuf->engine;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
+ int len;
flags |= PIPE_CONTROL_CS_STALL;
@@ -1806,11 +1734,23 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(ring->dev))
+ if (IS_GEN9(engine->dev))
vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
}
- ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ ret = intel_ring_begin(request, len);
if (ret)
return ret;
@@ -1823,30 +1763,48 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
intel_logical_ring_emit(ringbuf, 0);
}
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
+
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_advance(ringbuf);
return 0;
}
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
}
-static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
{
-
/*
* On BXT A steppings there is a HW coherency issue whereby the
* MI_STORE_DATA_IMM storing the completed request's seqno
@@ -1857,19 +1815,15 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
* bxt_a_set_seqno(), where we also do a clflush after the write. So
* this clflush in practice becomes an invalidate operation.
*/
-
- if (!lazy_coherency)
- intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
-
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
/* See bxt_a_get_seqno() explaining the reason for the clflush. */
- intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+ intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
/*
@@ -1889,7 +1843,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
@@ -1899,7 +1853,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf,
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
- hws_seqno_address(request->ring) |
+ hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1913,7 +1867,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
@@ -1929,7 +1883,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
- intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+ intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
@@ -1944,19 +1898,19 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
struct render_state so;
int ret;
- ret = i915_gem_render_state_prepare(req->ring, &so);
+ ret = i915_gem_render_state_prepare(req->engine, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+ ret = req->engine->emit_bb_start(req, so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
goto out;
- ret = req->ring->emit_bb_start(req,
+ ret = req->engine->emit_bb_start(req,
(so.ggtt_offset + so.aux_batch_offset),
I915_DISPATCH_SECURE);
if (ret)
@@ -1994,146 +1948,197 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
* @ring: Engine Command Streamer.
*
*/
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- dev_priv = ring->dev->dev_private;
+ /*
+ * Tasklet cannot be active at this point due intel_mark_active/idle
+ * so this is just for documentation.
+ */
+ if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
+ tasklet_kill(&engine->irq_tasklet);
+
+ dev_priv = engine->dev->dev_private;
- if (ring->buffer) {
- intel_logical_ring_stop(ring);
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ if (engine->buffer) {
+ intel_logical_ring_stop(engine);
+ WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
}
- if (ring->cleanup)
- ring->cleanup(ring);
+ if (engine->cleanup)
+ engine->cleanup(engine);
- i915_cmd_parser_fini_ring(ring);
- i915_gem_batch_pool_fini(&ring->batch_pool);
+ i915_cmd_parser_fini_ring(engine);
+ i915_gem_batch_pool_fini(&engine->batch_pool);
- if (ring->status_page.obj) {
- kunmap(sg_page(ring->status_page.obj->pages->sgl));
- ring->status_page.obj = NULL;
+ if (engine->status_page.obj) {
+ i915_gem_object_unpin_map(engine->status_page.obj);
+ engine->status_page.obj = NULL;
}
- ring->disable_lite_restore_wa = false;
- ring->ctx_desc_template = 0;
+ engine->idle_lite_restore_wa = 0;
+ engine->disable_lite_restore_wa = false;
+ engine->ctx_desc_template = 0;
- lrc_destroy_wa_ctx_obj(ring);
- ring->dev = NULL;
+ lrc_destroy_wa_ctx_obj(engine);
+ engine->dev = NULL;
}
static void
logical_ring_default_vfuncs(struct drm_device *dev,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- ring->init_hw = gen8_init_common_ring;
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ engine->init_hw = gen8_init_common_ring;
+ engine->emit_request = gen8_emit_request;
+ engine->emit_flush = gen8_emit_flush;
+ engine->irq_get = gen8_logical_ring_get_irq;
+ engine->irq_put = gen8_logical_ring_put_irq;
+ engine->emit_bb_start = gen8_emit_bb_start;
+ engine->get_seqno = gen8_get_seqno;
+ engine->set_seqno = gen8_set_seqno;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
+ engine->irq_seqno_barrier = bxt_a_seqno_barrier;
+ engine->set_seqno = bxt_a_set_seqno;
}
}
static inline void
-logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
+{
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+lrc_setup_hws(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *dctx_obj)
{
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
- ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ void *hws;
+
+ /* The HWSP is part of the default context object in LRC mode. */
+ engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+ LRC_PPHWSP_PN * PAGE_SIZE;
+ hws = i915_gem_object_pin_map(dctx_obj);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+ engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+ engine->status_page.obj = dctx_obj;
+
+ return 0;
}
static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
{
- struct intel_context *dctx = to_i915(dev)->kernel_context;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_context *dctx = dev_priv->kernel_context;
+ enum forcewake_domains fw_domains;
int ret;
/* Intentionally left blank. */
- ring->buffer = NULL;
+ engine->buffer = NULL;
+
+ engine->dev = dev;
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ i915_gem_batch_pool_init(dev, &engine->batch_pool);
+ init_waitqueue_head(&engine->irq_queue);
+
+ INIT_LIST_HEAD(&engine->buffers);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ INIT_LIST_HEAD(&engine->execlist_retired_req_list);
+ spin_lock_init(&engine->execlist_lock);
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
- i915_gem_batch_pool_init(dev, &ring->batch_pool);
- init_waitqueue_head(&ring->irq_queue);
+ tasklet_init(&engine->irq_tasklet,
+ intel_lrc_irq_handler, (unsigned long)engine);
- INIT_LIST_HEAD(&ring->buffers);
- INIT_LIST_HEAD(&ring->execlist_queue);
- INIT_LIST_HEAD(&ring->execlist_retired_req_list);
- spin_lock_init(&ring->execlist_lock);
+ logical_ring_init_platform_invariants(engine);
- logical_ring_init_platform_invariants(ring);
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+ RING_ELSP(engine),
+ FW_REG_WRITE);
- ret = i915_cmd_parser_init_ring(ring);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_PTR(engine),
+ FW_REG_READ | FW_REG_WRITE);
+
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_BUF_BASE(engine),
+ FW_REG_READ);
+
+ engine->fw_domains = fw_domains;
+
+ ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(dctx, ring);
+ ret = intel_lr_context_deferred_alloc(dctx, engine);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(dctx, ring);
+ ret = intel_lr_context_do_pin(dctx, engine);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
+ engine->name, ret);
+ goto error;
+ }
+
+ /* And setup the hardware status page. */
+ ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+ if (ret) {
+ DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
goto error;
}
return 0;
error:
- intel_logical_ring_cleanup(ring);
+ intel_logical_ring_cleanup(engine);
return ret;
}
static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
- ring->name = "render ring";
- ring->id = RCS;
- ring->exec_id = I915_EXEC_RENDER;
- ring->guc_id = GUC_RENDER_ENGINE;
- ring->mmio_base = RENDER_RING_BASE;
+ engine->name = "render ring";
+ engine->id = RCS;
+ engine->exec_id = I915_EXEC_RENDER;
+ engine->guc_id = GUC_RENDER_ENGINE;
+ engine->mmio_base = RENDER_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
+ logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
- ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_vfuncs(dev, engine);
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
- ring->init_hw = gen9_init_render_ring;
+ engine->init_hw = gen9_init_render_ring;
else
- ring->init_hw = gen8_init_render_ring;
- ring->init_context = gen8_init_rcs_context;
- ring->cleanup = intel_fini_pipe_control;
- ring->emit_flush = gen8_emit_flush_render;
- ring->emit_request = gen8_emit_request_render;
+ engine->init_hw = gen8_init_render_ring;
+ engine->init_context = gen8_init_rcs_context;
+ engine->cleanup = intel_fini_pipe_control;
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_request = gen8_emit_request_render;
- ring->dev = dev;
+ engine->dev = dev;
- ret = intel_init_pipe_control(ring);
+ ret = intel_init_pipe_control(engine);
if (ret)
return ret;
- ret = intel_init_workaround_bb(ring);
+ ret = intel_init_workaround_bb(engine);
if (ret) {
/*
* We continue even if we fail to initialize WA batch
@@ -2144,9 +2149,9 @@ static int logical_render_ring_init(struct drm_device *dev)
ret);
}
- ret = logical_ring_init(dev, ring);
+ ret = logical_ring_init(dev, engine);
if (ret) {
- lrc_destroy_wa_ctx_obj(ring);
+ lrc_destroy_wa_ctx_obj(engine);
}
return ret;
@@ -2155,69 +2160,69 @@ static int logical_render_ring_init(struct drm_device *dev)
static int logical_bsd_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS];
- ring->name = "bsd ring";
- ring->id = VCS;
- ring->exec_id = I915_EXEC_BSD;
- ring->guc_id = GUC_VIDEO_ENGINE;
- ring->mmio_base = GEN6_BSD_RING_BASE;
+ engine->name = "bsd ring";
+ engine->id = VCS;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->guc_id = GUC_VIDEO_ENGINE;
+ engine->mmio_base = GEN6_BSD_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_bsd2_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
- ring->name = "bsd2 ring";
- ring->id = VCS2;
- ring->exec_id = I915_EXEC_BSD;
- ring->guc_id = GUC_VIDEO_ENGINE2;
- ring->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->name = "bsd2 ring";
+ engine->id = VCS2;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->guc_id = GUC_VIDEO_ENGINE2;
+ engine->mmio_base = GEN8_BSD2_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_blt_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[BCS];
- ring->name = "blitter ring";
- ring->id = BCS;
- ring->exec_id = I915_EXEC_BLT;
- ring->guc_id = GUC_BLITTER_ENGINE;
- ring->mmio_base = BLT_RING_BASE;
+ engine->name = "blitter ring";
+ engine->id = BCS;
+ engine->exec_id = I915_EXEC_BLT;
+ engine->guc_id = GUC_BLITTER_ENGINE;
+ engine->mmio_base = BLT_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_vebox_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VECS];
- ring->name = "video enhancement ring";
- ring->id = VECS;
- ring->exec_id = I915_EXEC_VEBOX;
- ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
- ring->mmio_base = VEBOX_RING_BASE;
+ engine->name = "video enhancement ring";
+ engine->id = VECS;
+ engine->exec_id = I915_EXEC_VEBOX;
+ engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
+ engine->mmio_base = VEBOX_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
/**
@@ -2225,7 +2230,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
* @dev: DRM device.
*
* This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
* those engines that are present in the hardware.
*
* Return: non-zero if the initialization failed.
@@ -2266,13 +2271,13 @@ int intel_logical_rings_init(struct drm_device *dev)
return 0;
cleanup_vebox_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
cleanup_blt_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
cleanup_bsd_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
cleanup_render_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
return ret;
}
@@ -2320,13 +2325,13 @@ make_rpcs(struct drm_device *dev)
return rpcs;
}
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
- switch (INTEL_INFO(ring->dev)->gen) {
+ switch (INTEL_INFO(engine->dev)->gen) {
default:
- MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+ MISSING_CASE(INTEL_INFO(engine->dev)->gen);
/* fall through */
case 9:
indirect_ctx_offset =
@@ -2342,14 +2347,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
}
static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+populate_lr_context(struct intel_context *ctx,
+ struct drm_i915_gem_object *ctx_obj,
+ struct intel_engine_cs *engine,
+ struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
- struct page *page;
- uint32_t *reg_state;
+ void *vaddr;
+ u32 *reg_state;
int ret;
if (!ppgtt)
@@ -2361,18 +2368,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
return ret;
}
- ret = i915_gem_object_get_pages(ctx_obj);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not get object pages\n");
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
-
- i915_gem_object_pin_pages(ctx_obj);
+ ctx_obj->dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are
@@ -2380,33 +2386,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
* recreate this batchbuffer with new values (including all the missing
* MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
reg_state[CTX_LRI_HEADER_0] =
- MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
- ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
+ MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
+ ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
+ RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
(HAS_RESOURCE_STREAMER(dev) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
- ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
+ 0);
/* Ring buffer start address is not known until the buffer is pinned.
* It is written to the context image in execlists_update_context()
*/
- ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
+ ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
+ RING_START(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
+ RING_CTL(engine->mmio_base),
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
- ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
+ ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
+ RING_BBADDR_UDW(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
+ RING_BBADDR(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
+ RING_BBSTATE(engine->mmio_base),
RING_BB_PPGTT);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
- if (ring->id == RCS) {
- ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
- if (ring->wa_ctx.obj) {
- struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+ ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
+ RING_SBBADDR_UDW(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
+ RING_SBBADDR(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
+ RING_SBBSTATE(engine->mmio_base), 0);
+ if (engine->id == RCS) {
+ ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
+ RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
+ RING_INDIRECT_CTX(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
+ RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
+ if (engine->wa_ctx.obj) {
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2414,7 +2434,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
- intel_lr_indirect_ctx_offset(ring) << 6;
+ intel_lr_indirect_ctx_offset(engine) << 6;
reg_state[CTX_BB_PER_CTX_PTR+1] =
(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2422,16 +2442,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
}
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
- ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
+ RING_CTX_TIMESTAMP(engine->mmio_base), 0);
/* PDP values well be assigned later if needed */
- ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
+ 0);
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
@@ -2445,20 +2474,16 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
* With dynamic page allocation, PDPs may not be allocated at
* this point. Point the unallocated PDPs to the scratch page
*/
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+ execlists_update_context_pdps(ppgtt, reg_state);
}
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
make_rpcs(dev));
}
- kunmap_atomic(reg_state);
- i915_gem_object_unpin_pages(ctx_obj);
+ i915_gem_object_unpin_map(ctx_obj);
return 0;
}
@@ -2475,7 +2500,7 @@ void intel_lr_context_free(struct intel_context *ctx)
{
int i;
- for (i = I915_NUM_RINGS; --i >= 0; ) {
+ for (i = I915_NUM_ENGINES; --i >= 0; ) {
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
@@ -2485,6 +2510,7 @@ void intel_lr_context_free(struct intel_context *ctx)
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
+ i915_gem_object_unpin_map(ctx_obj);
}
WARN_ON(ctx->engine[i].pin_count);
@@ -2507,15 +2533,15 @@ void intel_lr_context_free(struct intel_context *ctx)
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
{
int ret = 0;
- WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
+ WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
- switch (ring->id) {
+ switch (engine->id) {
case RCS:
- if (INTEL_INFO(ring->dev)->gen >= 9)
+ if (INTEL_INFO(engine->dev)->gen >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2531,24 +2557,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *default_ctx_obj)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct page *page;
-
- /* The HWSP is part of the default context object in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
- + LRC_PPHWSP_PN * PAGE_SIZE;
- page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
- ring->status_page.page_addr = kmap(page);
- ring->status_page.obj = default_ctx_obj;
-
- I915_WRITE(RING_HWS_PGA(ring->mmio_base),
- (u32)ring->status_page.gfx_addr);
- POSTING_READ(RING_HWS_PGA(ring->mmio_base));
-}
-
/**
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
@@ -2564,18 +2572,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
*/
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- WARN_ON(ctx->engine[ring->id].state);
+ WARN_ON(ctx->engine[engine->id].state);
- context_size = round_up(intel_lr_context_size(ring), 4096);
+ context_size = round_up(intel_lr_context_size(engine), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2586,39 +2594,38 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
return -ENOMEM;
}
- ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error_deref_obj;
}
- ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+ ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ringbuf;
}
- ctx->engine[ring->id].ringbuf = ringbuf;
- ctx->engine[ring->id].state = ctx_obj;
+ ctx->engine[engine->id].ringbuf = ringbuf;
+ ctx->engine[engine->id].state = ctx_obj;
- if (ctx != ctx->i915->kernel_context && ring->init_context) {
+ if (ctx != ctx->i915->kernel_context && engine->init_context) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, ctx);
+ req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}
- ret = ring->init_context(req);
+ ret = engine->init_context(req);
+ i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
- i915_gem_request_cancel(req);
goto error_ringbuf;
}
- i915_add_request_no_flush(req);
}
return 0;
@@ -2626,40 +2633,38 @@ error_ringbuf:
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[ring->id].ringbuf = NULL;
- ctx->engine[ring->id].state = NULL;
+ ctx->engine[engine->id].ringbuf = NULL;
+ ctx->engine[engine->id].state = NULL;
return ret;
}
-void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+ struct intel_context *ctx)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[ring->id].state;
+ ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
- ctx->engine[ring->id].ringbuf;
+ ctx->engine[engine->id].ringbuf;
+ void *vaddr;
uint32_t *reg_state;
- struct page *page;
if (!ctx_obj)
continue;
- if (i915_gem_object_get_pages(ctx_obj)) {
- WARN(1, "Failed get_pages for context obj\n");
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (WARN_ON(IS_ERR(vaddr)))
continue;
- }
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
- kunmap_atomic(reg_state);
+ i915_gem_object_unpin_map(ctx_obj);
ringbuf->head = 0;
ringbuf->tail = 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e6cda3e22..60a7385bc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
+#include "intel_ringbuffer.h"
+
#define GEN8_LR_CONTEXT_ALIGN 4096
/* Execlists regs */
@@ -34,6 +36,7 @@
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
+#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
@@ -57,10 +60,9 @@
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
-void intel_logical_ring_stop(struct intel_engine_cs *ring);
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+void intel_logical_ring_stop(struct intel_engine_cs *engine);
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_logical_rings_init(struct drm_device *dev);
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
@@ -98,18 +100,21 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx);
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine);
-void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx);
+
+struct drm_i915_private;
+
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+ struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *engine);
u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *engine);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -118,7 +123,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-void intel_lrc_irq_handler(struct intel_engine_cs *ring);
-void intel_execlists_retire_requests(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 10dc3517b..96281e628 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,7 +109,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
- int dotclock;
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
@@ -134,12 +133,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- dotclock = pipe_config->port_clock;
-
- if (HAS_PCH_SPLIT(dev_priv->dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
@@ -155,7 +149,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
if (HAS_PCH_SPLIT(dev)) {
assert_fdi_rx_pll_disabled(dev_priv, pipe);
assert_shared_dpll_disabled(dev_priv,
- intel_crtc_to_shared_dpll(crtc));
+ crtc->config->shared_dpll);
} else {
assert_pll_disabled(dev_priv, pipe);
}
@@ -782,57 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the LVDS is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the LVDS is present.
- */
-static bool lvds_is_present_in_vbt(struct drm_device *dev,
- u8 *i2c_pin)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- if (!dev_priv->vbt.child_dev_num)
- return true;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- union child_device_config *uchild = dev_priv->vbt.child_dev + i;
- struct old_child_dev_config *child = &uchild->old;
-
- /* If the device type is not LFP, continue.
- * We have to check both the new identifiers as well as the
- * old for compatibility with some BIOSes.
- */
- if (child->device_type != DEVICE_TYPE_INT_LFP &&
- child->device_type != DEVICE_TYPE_LFP)
- continue;
-
- if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
- *i2c_pin = child->i2c_pin;
-
- /* However, we cannot trust the BIOS writers to populate
- * the VBT correctly. Since LVDS requires additional
- * information from AIM blocks, a non-zero addin offset is
- * a good indicator that the LVDS is actually present.
- */
- if (child->addin_offset)
- return true;
-
- /* But even then some BIOS writers perform some black magic
- * and instantiate the device without reference to any
- * additional data. Trust that if the VBT was written into
- * the OpRegion then they have validated the LVDS's existence.
- */
- if (dev_priv->opregion.vbt)
- return true;
- }
-
- return false;
-}
-
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
{
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
@@ -982,14 +925,14 @@ void intel_lvds_init(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
- if (dev_priv->vbt.edp_support) {
+ if (dev_priv->vbt.edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
}
pin = GMBUS_PIN_PANEL;
- if (!lvds_is_present_in_vbt(dev, &pin)) {
+ if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
@@ -1139,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
goto out;
}
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index fed7bea19..6ba4bf7f2 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -128,9 +128,9 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
/**
* get_mocs_settings()
- * @dev: DRM device.
+ * @dev_priv: i915 device.
* @table: Output table that will be made to point at appropriate
- * MOCS values for the device.
+ * MOCS values for the device.
*
* This function will return the values of the MOCS table that needs to
* be programmed for the platform. It will return the values that need
@@ -138,28 +138,28 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
*
* Return: true if there are applicable MOCS settings for the device.
*/
-static bool get_mocs_settings(struct drm_device *dev,
+static bool get_mocs_settings(struct drm_i915_private *dev_priv,
struct drm_i915_mocs_table *table)
{
bool result = false;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->table = broxton_mocs_table;
result = true;
} else {
- WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+ WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
"Platform that should have a MOCS table does not.\n");
}
return result;
}
-static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
{
switch (ring) {
case RCS:
@@ -179,10 +179,49 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
}
/**
+ * intel_mocs_init_engine() - emit the mocs control table
+ * @engine: The engine for whom to emit the registers.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_mocs_table table;
+ unsigned int index;
+
+ if (!get_mocs_settings(dev_priv, &table))
+ return 0;
+
+ if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
+ return -ENODEV;
+
+ for (index = 0; index < table.size; index++)
+ I915_WRITE(mocs_register(engine->id, index),
+ table.table[index].control_value);
+
+ /*
+ * Ok, now set the unused entries to uncached. These entries
+ * are officially undefined and no contract for the contents
+ * and settings is given for these entries.
+ *
+ * Entry 0 in the table is uncached - so we are just writing
+ * that value to all the used entries.
+ */
+ for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
+ I915_WRITE(mocs_register(engine->id, index),
+ table.table[0].control_value);
+
+ return 0;
+}
+
+/**
* emit_mocs_control_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
- * @ring: The engine for whom to emit the registers.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
@@ -190,27 +229,26 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
* Return: 0 on success, otherwise the error status.
*/
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
- const struct drm_i915_mocs_table *table,
- enum intel_ring_id ring)
+ const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
+ enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
for (index = 0; index < table->size; index++) {
- intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
+ intel_logical_ring_emit_reg(ringbuf,
+ mocs_register(engine, index));
intel_logical_ring_emit(ringbuf,
table->table[index].control_value);
}
@@ -224,8 +262,10 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
- intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
- intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+ intel_logical_ring_emit_reg(ringbuf,
+ mocs_register(engine, index));
+ intel_logical_ring_emit(ringbuf,
+ table->table[0].control_value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -234,6 +274,14 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
return 0;
}
+static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
+ u16 low,
+ u16 high)
+{
+ return table->table[low].l3cc_value |
+ table->table[high].l3cc_value << 16;
+}
+
/**
* emit_mocs_l3cc_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
@@ -249,39 +297,31 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
- unsigned int count;
unsigned int i;
- u32 value;
- u32 filler = (table->table[0].l3cc_value & 0xffff) |
- ((table->table[0].l3cc_value & 0xffff) << 16);
int ret;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
- for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
- value = (table->table[count].l3cc_value & 0xffff) |
- ((table->table[count + 1].l3cc_value & 0xffff) << 16);
-
+ for (i = 0; i < table->size/2; i++) {
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf, value);
+ intel_logical_ring_emit(ringbuf,
+ l3cc_combine(table, 2*i, 2*i+1));
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
- value = (table->table[count].l3cc_value & 0xffff) |
- ((table->table[0].l3cc_value & 0xffff) << 16);
- } else
- value = filler;
+ intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+ intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+ i++;
+ }
/*
* Now set the rest of the table to uncached - use entry 0 as
@@ -290,9 +330,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf, value);
-
- value = filler;
+ intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -302,6 +340,47 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
}
/**
+ * intel_mocs_init_l3cc_table() - program the mocs control table
+ * @dev: The the device to be programmed.
+ *
+ * This function simply programs the mocs registers for the given table
+ * starting at the given address. This register set is programmed in pairs.
+ *
+ * These registers may get programmed more than once, it is simpler to
+ * re-program 32 registers than maintain the state of when they were programmed.
+ * We are always reprogramming with the same values and this only on context
+ * start.
+ *
+ * Return: Nothing.
+ */
+void intel_mocs_init_l3cc_table(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_mocs_table table;
+ unsigned int i;
+
+ if (!get_mocs_settings(dev_priv, &table))
+ return;
+
+ for (i = 0; i < table.size/2; i++)
+ I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
+
+ /* Odd table size - 1 left over */
+ if (table.size & 0x01) {
+ I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
+ i++;
+ }
+
+ /*
+ * Now set the rest of the table to uncached - use entry 0 as
+ * this will be uncached. Leave the last pair as initialised as
+ * they are reserved by the hardware.
+ */
+ for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
+ I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
+}
+
+/**
* intel_rcs_context_init_mocs() - program the MOCS register.
* @req: Request to set up the MOCS tables for.
*
@@ -322,17 +401,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(req->ring->dev, &t)) {
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *ring;
- enum intel_ring_id ring_id;
-
- /* Program the control registers */
- for_each_ring(ring, dev_priv, ring_id) {
- ret = emit_mocs_control_table(req, &t, ring_id);
- if (ret)
- return ret;
- }
+ if (get_mocs_settings(req->i915, &t)) {
+ /* Program the RCS control registers */
+ ret = emit_mocs_control_table(req, &t);
+ if (ret)
+ return ret;
/* Now program the l3cc registers */
ret = emit_mocs_l3cc_table(req, &t);
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index 76e45b174..4640299e0 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -53,5 +53,7 @@
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+void intel_mocs_init_l3cc_table(struct drm_device *dev);
+int intel_mocs_init_engine(struct intel_engine_cs *ring);
#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index c15718b48..16e209d32 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -34,12 +34,6 @@
#include "i915_drv.h"
#include "intel_drv.h"
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
-#define PCI_SWSCI 0xe8
-#define PCI_SWSCI_SCISEL (1 << 15)
-#define PCI_SWSCI_GSSCIE (1 << 0)
-
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
@@ -246,13 +240,12 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
-#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
u32 main_function, sub_function, scic;
- u16 pci_swsci;
+ u16 swsci_val;
u32 dslp;
if (!swsci)
@@ -300,16 +293,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
swsci->scic = scic;
/* Ensure SCI event is selected and event trigger is cleared. */
- pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
- if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
- pci_swsci |= PCI_SWSCI_SCISEL;
- pci_swsci &= ~PCI_SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+ if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
+ swsci_val |= SWSCI_SCISEL;
+ swsci_val &= ~SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, SWSCI, swsci_val);
}
/* Use event trigger to tell bios to check the mail. */
- pci_swsci |= PCI_SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ swsci_val |= SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, SWSCI, swsci_val);
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -905,9 +898,6 @@ static void swsci_setup(struct drm_device *dev)
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
}
-#else /* CONFIG_ACPI */
-static inline void swsci_setup(struct drm_device *dev) {}
-#endif /* CONFIG_ACPI */
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
@@ -943,16 +933,14 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
- pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+ pci_read_config_dword(dev->pdev, ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
-#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work);
-#endif
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
if (!base)
@@ -1024,3 +1012,42 @@ err_out:
memunmap(base);
return err;
}
+
+int
+intel_opregion_get_panel_type(struct drm_device *dev)
+{
+ u32 panel_details;
+ int ret;
+
+ ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = (panel_details >> 8) & 0xff;
+ if (ret > 0x10) {
+ DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+ return -EINVAL;
+ }
+
+ /* fall back to VBT panel type? */
+ if (ret == 0x0) {
+ DRM_DEBUG_KMS("No panel type in OpRegion\n");
+ return -ENODEV;
+ }
+
+ /*
+ * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+ * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+ * vswing instead. Low vswing results in some display flickers, so
+ * let's simply ignore the OpRegion panel type on SKL for now.
+ */
+ if (IS_SKYLAKE(dev)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
+ return ret - 1;
+}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9168413fe..bd38e49f7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -190,13 +190,14 @@ struct intel_overlay {
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_wc(ggtt->mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
@@ -233,30 +234,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
overlay->active = true;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
@@ -267,7 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -283,19 +284,19 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_advance(engine);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -336,7 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -349,33 +350,34 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
/* wait for overlay to go idle */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
if (IS_I830(dev)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
} else {
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_emit(engine,
+ MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
@@ -408,7 +410,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -423,19 +425,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine,
+ MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
@@ -1124,7 +1127,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(drmmode_crtc);
- new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
put_image_rec->bo_handle));
if (&new_bo->base == NULL) {
ret = -ENOENT;
@@ -1479,7 +1482,8 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1488,7 +1492,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_atomic_wc(ggtt->mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21ee6477b..aba940998 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
+ pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
val *= lbpc;
}
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
- pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
+ pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
}
if (IS_GEN4(dev_priv)) {
@@ -1240,7 +1240,7 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
*/
static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- return KHz(19200) / pwm_freq_hz;
+ return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
}
/*
@@ -1251,16 +1251,14 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 mul, clock;
+ u32 mul;
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
mul = 128;
else
mul = 16;
- clock = MHz(24);
-
- return clock / (pwm_freq_hz * mul);
+ return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
}
/*
@@ -1283,7 +1281,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
clock = MHz(24); /* LPT:LP */
- return clock / (pwm_freq_hz * mul);
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
/*
@@ -1292,10 +1290,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- int clock = MHz(intel_pch_rawclk(dev));
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return clock / (pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
}
/*
@@ -1308,16 +1305,15 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int clock;
- if (IS_PINEVIEW(dev))
- clock = MHz(intel_hrawclk(dev));
+ if (IS_PINEVIEW(dev_priv))
+ clock = KHz(dev_priv->rawclk_freq);
else
- clock = 1000 * dev_priv->cdclk_freq;
+ clock = KHz(dev_priv->cdclk_freq);
- return clock / (pwm_freq_hz * 32);
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
/*
@@ -1332,11 +1328,11 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(dev_priv))
- clock = MHz(intel_hrawclk(dev));
+ clock = KHz(dev_priv->rawclk_freq);
else
- clock = 1000 * dev_priv->cdclk_freq;
+ clock = KHz(dev_priv->cdclk_freq);
- return clock / (pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
/*
@@ -1346,19 +1342,21 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int clock;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int mul, clock;
if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(dev))
- return KHz(19200) / (pwm_freq_hz * 16);
+ if (IS_CHERRYVIEW(dev_priv))
+ clock = KHz(19200);
else
- return MHz(25) / (pwm_freq_hz * 16);
+ clock = MHz(25);
+ mul = 16;
} else {
- clock = intel_hrawclk(dev);
- return MHz(clock) / (pwm_freq_hz * 128);
+ clock = KHz(dev_priv->rawclk_freq);
+ mul = 128;
}
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
static u32 get_backlight_max_vbt(struct intel_connector *connector)
@@ -1640,6 +1638,12 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(panel->backlight.pwm);
+
retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
@@ -1727,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+ } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
@@ -1745,7 +1750,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.get = pch_get_backlight;
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (dev_priv->vbt.has_mipi) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.setup = pwm_setup_backlight;
panel->backlight.enable = pwm_enable_backlight;
panel->backlight.disable = pwm_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 989b94df6..68a1f4cc2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -54,10 +54,38 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+ I915_WRITE(GEN8_CONFIG0,
+ I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+ /* WaEnableChickenDCPR:skl,bxt,kbl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
+}
+
static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ gen9_init_clock_gating(dev);
+
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -487,20 +515,6 @@ static const struct intel_watermark_params g4x_cursor_wm_info = {
.guard_size = 2,
.cacheline_size = G4X_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params valleyview_wm_info = {
- .fifo_size = VALLEYVIEW_FIFO_SIZE,
- .max_wm = VALLEYVIEW_MAX_WM,
- .default_wm = VALLEYVIEW_MAX_WM,
- .guard_size = 2,
- .cacheline_size = G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params valleyview_cursor_wm_info = {
- .fifo_size = I965_CURSOR_FIFO,
- .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
- .default_wm = I965_CURSOR_DFT_WM,
- .guard_size = 2,
- .cacheline_size = G4X_FIFO_LINE_SIZE,
-};
static const struct intel_watermark_params i965_cursor_wm_info = {
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
@@ -2010,11 +2024,18 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
cur_latency *= 5;
}
- result->pri_val = ilk_compute_pri_wm(cstate, pristate,
- pri_latency, level);
- result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
- result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
- result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ if (pristate) {
+ result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+ pri_latency, level);
+ result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ }
+
+ if (sprstate)
+ result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+
+ if (curstate)
+ result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+
result->enable = true;
}
@@ -2278,96 +2299,167 @@ static void skl_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
}
+static bool ilk_validate_pipe_wm(struct drm_device *dev,
+ struct intel_pipe_wm *pipe_wm)
+{
+ /* LP0 watermark maximums depend on this pipe alone */
+ const struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = pipe_wm->sprites_enabled,
+ .sprites_scaled = pipe_wm->sprites_scaled,
+ };
+ struct ilk_wm_maximums max;
+
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ /* At least LP0 must be valid */
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+ DRM_DEBUG_KMS("LP0 watermark invalid\n");
+ return false;
+ }
+
+ return true;
+}
+
/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
- struct drm_atomic_state *state)
+static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_pipe_wm *pipe_wm;
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_state *cstate = NULL;
struct intel_plane *intel_plane;
- struct drm_plane_state *ps;
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev);
- /* LP0 watermark maximums depend on this pipe alone */
- struct intel_wm_config config = {
- .num_pipes_active = 1,
- };
+ int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
- cstate = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(cstate))
- return PTR_ERR(cstate);
-
pipe_wm = &cstate->wm.optimal.ilk;
- memset(pipe_wm, 0, sizeof(*pipe_wm));
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- ps = drm_atomic_get_plane_state(state,
- &intel_plane->base);
- if (IS_ERR(ps))
- return PTR_ERR(ps);
+ struct intel_plane_state *ps;
+
+ ps = intel_atomic_get_existing_plane_state(state,
+ intel_plane);
+ if (!ps)
+ continue;
if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- pristate = to_intel_plane_state(ps);
+ pristate = ps;
else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = to_intel_plane_state(ps);
+ sprstate = ps;
else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
- curstate = to_intel_plane_state(ps);
+ curstate = ps;
}
- config.sprites_enabled = sprstate->visible;
- config.sprites_scaled = sprstate->visible &&
- (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
- drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
-
pipe_wm->pipe_enabled = cstate->base.active;
- pipe_wm->sprites_enabled = config.sprites_enabled;
- pipe_wm->sprites_scaled = config.sprites_scaled;
+ if (sprstate) {
+ pipe_wm->sprites_enabled = sprstate->visible;
+ pipe_wm->sprites_scaled = sprstate->visible &&
+ (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
+ drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+ }
+
+ usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
- max_level = 1;
+ if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
- if (config.sprites_scaled)
- max_level = 0;
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
- pristate, sprstate, curstate, &pipe_wm->wm[0]);
+ pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+ pipe_wm->wm[0] = pipe_wm->raw_wm[0];
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
- /* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
-
- /* At least LP0 must be valid */
- if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
+ if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
ilk_compute_wm_reg_maximums(dev, 1, &max);
for (level = 1; level <= max_level; level++) {
- struct intel_wm_level wm = {};
+ struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
- pristate, sprstate, curstate, &wm);
+ pristate, sprstate, curstate, wm);
/*
* Disable any watermark level that exceeds the
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(level, &max, &wm))
- break;
+ if (level > usable_level)
+ continue;
+
+ if (ilk_validate_wm_level(level, &max, wm))
+ pipe_wm->wm[level] = *wm;
+ else
+ usable_level = level;
+ }
+
+ return 0;
+}
+
+/*
+ * Build a set of 'intermediate' watermark values that satisfy both the old
+ * state and the new state. These can be programmed to the hardware
+ * immediately.
+ */
+static int ilk_compute_intermediate_wm(struct drm_device *dev,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *newstate)
+{
+ struct intel_pipe_wm *a = &newstate->wm.intermediate;
+ struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /*
+ * Start with the final, target watermarks, then combine with the
+ * currently active watermarks to get values that are safe both before
+ * and after the vblank.
+ */
+ *a = newstate->wm.optimal.ilk;
+ a->pipe_enabled |= b->pipe_enabled;
+ a->sprites_enabled |= b->sprites_enabled;
+ a->sprites_scaled |= b->sprites_scaled;
- pipe_wm->wm[level] = wm;
+ for (level = 0; level <= max_level; level++) {
+ struct intel_wm_level *a_wm = &a->wm[level];
+ const struct intel_wm_level *b_wm = &b->wm[level];
+
+ a_wm->enable &= b_wm->enable;
+ a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
+ a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
+ a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
+ a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
}
+ /*
+ * We need to make sure that these merged watermark values are
+ * actually a valid configuration themselves. If they're not,
+ * there's no safe way to transition from the old state to
+ * the new state, so we need to fail the atomic transaction.
+ */
+ if (!ilk_validate_pipe_wm(dev, a))
+ return -EINVAL;
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+ newstate->wm.need_postvbl_update = false;
+
return 0;
}
@@ -2383,9 +2475,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
ret_wm->enable = true;
for_each_intel_crtc(dev, intel_crtc) {
- const struct intel_crtc_state *cstate =
- to_intel_crtc_state(intel_crtc->base.state);
- const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+ const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
if (!active->pipe_enabled)
@@ -2421,7 +2511,7 @@ static void ilk_wm_merge(struct drm_device *dev,
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
config->num_pipes_active > 1)
- return;
+ last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
@@ -2533,15 +2623,14 @@ static void ilk_compute_wm_results(struct drm_device *dev,
/* LP0 register values */
for_each_intel_crtc(dev, intel_crtc) {
- const struct intel_crtc_state *cstate =
- to_intel_crtc_state(intel_crtc->base.state);
enum pipe pipe = intel_crtc->pipe;
- const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
+ const struct intel_wm_level *r =
+ &intel_crtc->wm.active.ilk.wm[0];
if (WARN_ON(!r->enable))
continue;
- results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
+ results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
@@ -2748,7 +2837,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
dev_priv->wm.hw = *results;
}
-static bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3764,11 +3853,9 @@ static void ilk_compute_wm_config(struct drm_device *dev,
}
}
-static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc = cstate->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = dev_priv->dev;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
@@ -3799,28 +3886,28 @@ static void ilk_program_watermarks(struct intel_crtc_state *cstate)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_update_wm(struct drm_crtc *crtc)
+static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
-
- WARN_ON(cstate->base.active != intel_crtc->active);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- /*
- * IVB workaround: must disable low power watermarks for at least
- * one frame before enabling scaling. LP watermarks can be re-enabled
- * when scaling is disabled.
- *
- * WaCxSRDisabledForSpriteScaling:ivb
- */
- if (cstate->disable_lp_wm) {
- ilk_disable_lp_wm(crtc->dev);
- intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
- }
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
- intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- ilk_program_watermarks(cstate);
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ if (cstate->wm.need_postvbl_update) {
+ intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+ ilk_program_watermarks(dev_priv);
+ }
+ mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void skl_pipe_wm_active_state(uint32_t val,
@@ -4354,7 +4441,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (IS_GEN9(dev_priv->dev)) {
+ if (IS_GEN9(dev_priv)) {
limits = (dev_priv->rps.max_freq_softlimit) << 23;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@@ -4639,7 +4726,7 @@ void intel_set_rps(struct drm_device *dev, u8 val)
gen6_set_rps(dev, val);
}
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4647,12 +4734,20 @@ static void gen9_disable_rps(struct drm_device *dev)
I915_WRITE(GEN9_PG_ENABLE, 0);
}
+static void gen9_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_RP_CONTROL, 0);
}
static void cherryview_disable_rps(struct drm_device *dev)
@@ -4696,7 +4791,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
@@ -4710,9 +4806,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
- dev_priv->gtt.stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
+ (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
+ ggtt->stolen_reserved_size))) {
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -4855,6 +4951,16 @@ static void gen9_enable_rps(struct drm_device *dev)
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ /*
+ * BIOS could leave the Hw Turbo enabled, so need to explicitly
+ * clear out the Control register just to avoid inconsitency
+ * with debugfs interface, which will show Turbo as enabled
+ * only and that is not expected by the User after adding the
+ * WaGsvDisableTurbo. Apart from this there is no problem even
+ * if the Turbo is left enabled in the Control register, as the
+ * Up/Down interrupts would remain masked.
+ */
+ gen9_disable_rps(dev);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@@ -4873,7 +4979,7 @@ static void gen9_enable_rps(struct drm_device *dev)
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4881,9 +4987,8 @@ static void gen9_enable_rps(struct drm_device *dev)
static void gen9_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
- int unused;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -4904,8 +5009,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, unused)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
@@ -4951,9 +5056,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
- int unused;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -4972,8 +5076,8 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, unused)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@@ -5033,11 +5137,11 @@ static void gen8_enable_rps(struct drm_device *dev)
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
- int i, ret;
+ int ret;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5050,7 +5154,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_STATE, 0);
/* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
@@ -5069,8 +5174,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -5355,9 +5460,9 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
static void cherryview_setup_pctx(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long pctx_paddr, paddr;
- struct i915_gtt *gtt = &dev_priv->gtt;
u32 pcbr;
int pctx_size = 32*1024;
@@ -5365,7 +5470,7 @@ static void cherryview_setup_pctx(struct drm_device *dev)
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
- (gtt->stolen_size - pctx_size));
+ (ggtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
@@ -5433,6 +5538,17 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
dev_priv->vlv_pctx = NULL;
}
+static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
+{
+ dev_priv->rps.gpll_ref_freq =
+ vlv_get_cck_clock(dev_priv, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ dev_priv->czclk_freq);
+
+ DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
+ dev_priv->rps.gpll_ref_freq);
+}
+
static void valleyview_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5440,6 +5556,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
valleyview_setup_pctx(dev);
+ vlv_init_gpll_ref_freq(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -5497,6 +5615,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
cherryview_setup_pctx(dev);
+ vlv_init_gpll_ref_freq(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
mutex_lock(&dev_priv->sb_lock);
@@ -5561,13 +5681,13 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
static void cherryview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
- int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- gtfifodbg = I915_READ(GTFIFODBG);
+ gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
+ GT_FIFO_FREE_ENTRIES_CHV);
if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
@@ -5588,8 +5708,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
@@ -5648,10 +5768,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+ dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5659,15 +5779,15 @@ static void cherryview_enable_rps(struct drm_device *dev)
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
- int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
valleyview_check_pctx(dev_priv);
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
@@ -5699,8 +5819,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -5738,10 +5858,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+ dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6076,17 +6196,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool ret = false;
- int i;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- for_each_ring(ring, dev_priv, i)
- ret |= !list_empty(&ring->request_list);
+ for_each_engine(engine, dev_priv)
+ ret |= !list_empty(&engine->request_list);
out_unlock:
spin_unlock_irq(&mchdev_lock);
@@ -6306,9 +6425,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_disable_rc6(dev);
gen9_disable_rps(dev);
- else if (IS_CHERRYVIEW(dev))
+ } else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
@@ -6715,6 +6835,38 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaDisableSDEUnitClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableGamClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaFbcNukeOnHostModify:kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaFbcNukeOnHostModify:skl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6929,23 +7081,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
- */
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
-}
-
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_init_display_clock_gating(dev_priv);
-
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7028,8 +7167,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_init_display_clock_gating(dev_priv);
-
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7169,8 +7306,7 @@ void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->display.init_clock_gating)
- dev_priv->display.init_clock_gating(dev);
+ dev_priv->display.init_clock_gating(dev);
}
void intel_suspend_hw(struct drm_device *dev)
@@ -7179,6 +7315,60 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
+static void nop_init_clock_gating(struct drm_device *dev)
+{
+ DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+}
+
+/**
+ * intel_init_clock_gating_hooks - setup the clock gating hooks
+ * @dev_priv: device private
+ *
+ * Setup the hooks that configure which clocks of a given platform can be
+ * gated and also apply various GT and display specific workarounds for these
+ * platforms. Note that some GT specific workarounds are applied separately
+ * when GPU contexts or batchbuffers start their execution.
+ */
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
+{
+ if (IS_SKYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
+ else if (IS_KABYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
+ else if (IS_BROXTON(dev_priv))
+ dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+ else if (IS_BROADWELL(dev_priv))
+ dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
+ else if (IS_HASWELL(dev_priv))
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ else if (IS_IVYBRIDGE(dev_priv))
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
+ else if (IS_GEN6(dev_priv))
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ else if (IS_GEN5(dev_priv))
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ else if (IS_CRESTLINE(dev_priv))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev_priv))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ else if (IS_GEN3(dev_priv))
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ else if (IS_GEN2(dev_priv))
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ else {
+ MISSING_CASE(INTEL_DEVID(dev_priv));
+ dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ }
+}
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
@@ -7195,10 +7385,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
-
- if (IS_BROXTON(dev))
- dev_priv->display.init_clock_gating =
- bxt_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
@@ -7207,36 +7393,23 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
- dev_priv->display.program_watermarks = ilk_program_watermarks;
+ dev_priv->display.compute_intermediate_wm =
+ ilk_compute_intermediate_wm;
+ dev_priv->display.initial_watermarks =
+ ilk_initial_watermarks;
+ dev_priv->display.optimize_watermarks =
+ ilk_optimize_watermarks;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
-
- if (IS_GEN5(dev))
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
- else if (IS_GEN6(dev))
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_IVYBRIDGE(dev))
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- else if (IS_HASWELL(dev))
- dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- else if (INTEL_INFO(dev)->gen == 8)
- dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
vlv_setup_wm_latency(dev);
-
dev_priv->display.update_wm = vlv_update_wm;
- dev_priv->display.init_clock_gating =
- cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
vlv_setup_wm_latency(dev);
-
dev_priv->display.update_wm = vlv_update_wm;
- dev_priv->display.init_clock_gating =
- valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
@@ -7252,20 +7425,13 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.update_wm = g4x_update_wm;
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
dev_priv->display.update_wm = i965_update_wm;
- if (IS_CRESTLINE(dev))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
} else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_GEN2(dev)) {
if (INTEL_INFO(dev)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
@@ -7274,11 +7440,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
-
- if (IS_I85X(dev) || IS_I865G(dev))
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
} else {
DRM_ERROR("unexpected fall-through in intel_init_pm\n");
}
@@ -7332,78 +7493,43 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
return 0;
}
-static int vlv_gpu_freq_div(unsigned int czclk_freq)
-{
- switch (czclk_freq) {
- case 200:
- return 10;
- case 267:
- return 12;
- case 320:
- case 333:
- return 16;
- case 400:
- return 20;
- default:
- return -1;
- }
-}
-
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- div = vlv_gpu_freq_div(czclk_freq);
- if (div < 0)
- return div;
-
- return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- mul = vlv_gpu_freq_div(czclk_freq);
- if (mul < 0)
- return mul;
-
- return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
+ return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- div = vlv_gpu_freq_div(czclk_freq);
- if (div < 0)
- return div;
- div /= 2;
-
- return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- mul = vlv_gpu_freq_div(czclk_freq);
- if (mul < 0)
- return mul;
- mul /= 2;
-
/* CHV needs even values */
- return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
}
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv->dev))
+ if (IS_GEN9(dev_priv))
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
- else if (IS_CHERRYVIEW(dev_priv->dev))
+ else if (IS_CHERRYVIEW(dev_priv))
return chv_gpu_freq(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv->dev))
+ else if (IS_VALLEYVIEW(dev_priv))
return byt_gpu_freq(dev_priv, val);
else
return val * GT_FREQUENCY_MULTIPLIER;
@@ -7411,12 +7537,12 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv->dev))
+ if (IS_GEN9(dev_priv))
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
- else if (IS_CHERRYVIEW(dev_priv->dev))
+ else if (IS_CHERRYVIEW(dev_priv))
return chv_freq_opcode(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv->dev))
+ else if (IS_VALLEYVIEW(dev_priv))
return byt_freq_opcode(dev_priv, val);
else
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
@@ -7433,7 +7559,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req, true))
- gen6_rps_boost(to_i915(req->ring->dev), NULL,
+ gen6_rps_boost(to_i915(req->engine->dev), NULL,
req->emitted_jiffies);
i915_gem_request_unreference__unlocked(req);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index bd322d8fb..a788d1e95 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -546,7 +546,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ EDP_PSR_STATUS_STATE_MASK) == 0,
+ 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
@@ -601,7 +602,7 @@ static void intel_psr_work(struct work_struct *work)
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (HAS_DDI(dev_priv->dev)) {
+ if (HAS_DDI(dev_priv)) {
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -819,8 +820,7 @@ void intel_psr_init(struct drm_device *dev)
/* Per platform default */
if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9121646d7..68c5af079 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,25 +53,19 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
+bool intel_engine_stopped(struct intel_engine_cs *engine)
{
- intel_ring_update_space(ringbuf);
- return ringbuf->space;
-}
-
-bool intel_ring_stopped(struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
}
-static void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *engine)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
- if (intel_ring_stopped(ring))
+ if (intel_engine_stopped(engine))
return;
- ring->write_tail(ring, ringbuf->tail);
+ engine->write_tail(engine, ringbuf->tail);
}
static int
@@ -79,7 +73,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 cmd;
int ret;
@@ -94,9 +88,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -106,8 +100,8 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
@@ -153,9 +147,9 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -200,34 +194,34 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = req->engine;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(engine, 0); /* low dword */
+ intel_ring_emit(engine, 0); /* high dword */
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -236,9 +230,9 @@ static int
gen6_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -276,11 +270,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -288,19 +282,19 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -309,9 +303,9 @@ static int
gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/*
@@ -360,11 +354,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -373,20 +367,20 @@ static int
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -396,7 +390,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -429,51 +423,51 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-static void ring_write_tail(struct intel_engine_cs *ring,
+static void ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- I915_WRITE_TAIL(ring, value);
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ I915_WRITE_TAIL(engine, value);
}
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u64 acthd;
- if (INTEL_INFO(ring->dev)->gen >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
- RING_ACTHD_UDW(ring->mmio_base));
- else if (INTEL_INFO(ring->dev)->gen >= 4)
- acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+ if (INTEL_INFO(engine->dev)->gen >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+ RING_ACTHD_UDW(engine->mmio_base));
+ else if (INTEL_INFO(engine->dev)->gen >= 4)
+ acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
return acthd;
}
-static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(ring->dev)->gen >= 4)
+ if (INTEL_INFO(engine->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
-static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (IS_GEN7(dev)) {
- switch (ring->id) {
+ switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
@@ -492,14 +486,14 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else if (IS_GEN6(engine->dev)) {
+ mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
+ mmio = RING_HWS_PGA(engine->mmio_base);
}
- I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
POSTING_READ(mmio);
/*
@@ -510,10 +504,10 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
- i915_reg_t reg = RING_INSTPM(ring->mmio_base);
+ i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -521,117 +515,125 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- ring->name);
+ engine->name);
}
}
-static bool stop_ring(struct intel_engine_cs *ring)
+static bool stop_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
- if (!IS_GEN2(ring->dev)) {
- I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
- DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+ if (!IS_GEN2(engine->dev)) {
+ I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s : timed out trying to stop ring\n",
+ engine->name);
/* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
- if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+ if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
return false;
}
}
- I915_WRITE_CTL(ring, 0);
- I915_WRITE_HEAD(ring, 0);
- ring->write_tail(ring, 0);
+ I915_WRITE_CTL(engine, 0);
+ I915_WRITE_HEAD(engine, 0);
+ engine->write_tail(engine, 0);
- if (!IS_GEN2(ring->dev)) {
- (void)I915_READ_CTL(ring);
- I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ if (!IS_GEN2(engine->dev)) {
+ (void)I915_READ_CTL(engine);
+ I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
- return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+ return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
-static int init_ring_common(struct intel_engine_cs *ring)
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+static int init_ring_common(struct intel_engine_cs *engine)
+{
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (!stop_ring(ring)) {
+ if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_HEAD(engine),
+ I915_READ_TAIL(engine),
+ I915_READ_START(engine));
- if (!stop_ring(ring)) {
+ if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_HEAD(engine),
+ I915_READ_TAIL(engine),
+ I915_READ_START(engine));
ret = -EIO;
goto out;
}
}
if (I915_NEED_GFX_HWS(dev))
- intel_ring_setup_status_page(ring);
+ intel_ring_setup_status_page(engine);
else
- ring_setup_phys_status_page(ring);
+ ring_setup_phys_status_page(engine);
/* Enforce ordering by reading HEAD register back */
- I915_READ_HEAD(ring);
+ I915_READ_HEAD(engine);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (I915_READ_HEAD(ring))
+ if (I915_READ_HEAD(engine))
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
- ring->name, I915_READ_HEAD(ring));
- I915_WRITE_HEAD(ring, 0);
- (void)I915_READ_HEAD(ring);
+ engine->name, I915_READ_HEAD(engine));
+ I915_WRITE_HEAD(engine, 0);
+ (void)I915_READ_HEAD(engine);
- I915_WRITE_CTL(ring,
+ I915_WRITE_CTL(engine,
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
- (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+ if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
+ I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
+ (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
- ring->name,
- I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
- I915_READ_HEAD(ring), I915_READ_TAIL(ring),
- I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_CTL(engine) & RING_VALID,
+ I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+ I915_READ_START(engine),
+ (unsigned long)i915_gem_obj_ggtt_offset(obj));
ret = -EIO;
goto out;
}
ringbuf->last_retired_head = -1;
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->head = I915_READ_HEAD(engine);
+ ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
intel_ring_update_space(ringbuf);
- memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+ intel_engine_init_hangcheck(engine);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -640,59 +642,60 @@ out:
}
void
-intel_fini_pipe_control(struct intel_engine_cs *ring)
+intel_fini_pipe_control(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
- if (ring->scratch.obj == NULL)
+ if (engine->scratch.obj == NULL)
return;
if (INTEL_INFO(dev)->gen >= 5) {
- kunmap(sg_page(ring->scratch.obj->pages->sgl));
- i915_gem_object_ggtt_unpin(ring->scratch.obj);
+ kunmap(sg_page(engine->scratch.obj->pages->sgl));
+ i915_gem_object_ggtt_unpin(engine->scratch.obj);
}
- drm_gem_object_unreference(&ring->scratch.obj->base);
- ring->scratch.obj = NULL;
+ drm_gem_object_unreference(&engine->scratch.obj->base);
+ engine->scratch.obj = NULL;
}
int
-intel_init_pipe_control(struct intel_engine_cs *ring)
+intel_init_pipe_control(struct intel_engine_cs *engine)
{
int ret;
- WARN_ON(ring->scratch.obj);
+ WARN_ON(engine->scratch.obj);
- ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
- if (ring->scratch.obj == NULL) {
+ engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
+ if (engine->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
- ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+ ret = i915_gem_object_set_cache_level(engine->scratch.obj,
+ I915_CACHE_LLC);
if (ret)
goto err_unref;
- ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+ ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
if (ret)
goto err_unref;
- ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
- ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
- if (ring->scratch.cpu_page == NULL) {
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
+ engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
+ if (engine->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- ring->name, ring->scratch.gtt_offset);
+ engine->name, engine->scratch.gtt_offset);
return 0;
err_unpin:
- i915_gem_object_ggtt_unpin(ring->scratch.obj);
+ i915_gem_object_ggtt_unpin(engine->scratch.obj);
err_unref:
- drm_gem_object_unreference(&ring->scratch.obj->base);
+ drm_gem_object_unreference(&engine->scratch.obj->base);
err:
return ret;
}
@@ -700,15 +703,15 @@ err:
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -717,16 +720,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
+ intel_ring_emit_reg(engine, w->reg[i].addr);
+ intel_ring_emit(engine, w->reg[i].value);
}
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -789,25 +792,26 @@ static int wa_add(struct drm_i915_private *dev_priv,
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+ i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[ring->id];
+ const uint32_t index = wa->hw_whitelist_count[engine->id];
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
- WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+ WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[ring->id]++;
+ wa->hw_whitelist_count[engine->id]++;
return 0;
}
-static int gen8_init_workarounds(struct intel_engine_cs *ring)
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -857,13 +861,13 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
-static int bdw_init_workarounds(struct intel_engine_cs *ring)
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen8_init_workarounds(ring);
+ ret = gen8_init_workarounds(engine);
if (ret)
return ret;
@@ -886,13 +890,13 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
-static int chv_init_workarounds(struct intel_engine_cs *ring)
+static int chv_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen8_init_workarounds(ring);
+ ret = gen8_init_workarounds(engine);
if (ret)
return ret;
@@ -905,26 +909,30 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
-static int gen9_init_workarounds(struct intel_engine_cs *ring)
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
int ret;
- /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl */
+ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaDisablePartialInstShootdown:skl,bxt */
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt */
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -946,17 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX);
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
- /* Wa4x4STCOptimizationDisable:skl,bxt */
- /* WaDisablePartialResolveInVc:skl,bxt */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
@@ -966,41 +975,67 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
- tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- /* WaDisableSTUnitPowerOptimization:skl,bxt */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
- /* WaOCLCoherentLineFlush:skl,bxt */
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
- ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
- ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
+ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
return 0;
}
-static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -1040,13 +1075,13 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
return 0;
}
-static int skl_init_workarounds(struct intel_engine_cs *ring)
+static int skl_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen9_init_workarounds(ring);
+ ret = gen9_init_workarounds(engine);
if (ret)
return ret;
@@ -1085,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- /* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
- }
-
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
@@ -1113,21 +1132,24 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
/* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
- return skl_tune_iz_hashing(ring);
+ return skl_tune_iz_hashing(engine);
}
-static int bxt_init_workarounds(struct intel_engine_cs *ring)
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen9_init_workarounds(ring);
+ ret = gen9_init_workarounds(engine);
if (ret)
return ret;
@@ -1158,48 +1180,108 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+ ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
- ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
}
+ /* WaInsertDummyPushConstPs:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
return 0;
}
-int init_workarounds_ring(struct intel_engine_cs *ring)
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaInsertDummyPushConstPs:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- WARN_ON(ring->id != RCS);
+ WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
if (IS_BROADWELL(dev))
- return bdw_init_workarounds(ring);
+ return bdw_init_workarounds(engine);
if (IS_CHERRYVIEW(dev))
- return chv_init_workarounds(ring);
+ return chv_init_workarounds(engine);
if (IS_SKYLAKE(dev))
- return skl_init_workarounds(ring);
+ return skl_init_workarounds(engine);
if (IS_BROXTON(dev))
- return bxt_init_workarounds(ring);
+ return bxt_init_workarounds(engine);
+
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_init_workarounds(engine);
return 0;
}
-static int init_render_ring(struct intel_engine_cs *ring)
+static int init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = init_ring_common(ring);
+ int ret = init_ring_common(engine);
if (ret)
return ret;
@@ -1242,14 +1324,14 @@ static int init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (HAS_L3_DPF(dev))
- I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
- return init_workarounds_ring(ring);
+ return init_workarounds_ring(engine);
}
-static void render_ring_cleanup(struct intel_engine_cs *ring)
+static void render_ring_cleanup(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->semaphore_obj) {
@@ -1258,18 +1340,19 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
dev_priv->semaphore_obj = NULL;
}
- intel_fini_pipe_control(ring);
+ intel_fini_pipe_control(engine);
}
static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
- struct intel_engine_cs *signaller = signaller_req->ring;
+ struct intel_engine_cs *signaller = signaller_req->engine;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
- int i, ret, num_rings;
+ enum intel_engine_id id;
+ int ret, num_rings;
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1279,9 +1362,9 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
if (ret)
return ret;
- for_each_ring(waiter, dev_priv, i) {
+ for_each_engine_id(waiter, dev_priv, id) {
u32 seqno;
- u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@@ -1295,7 +1378,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1306,11 +1389,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
- struct intel_engine_cs *signaller = signaller_req->ring;
+ struct intel_engine_cs *signaller = signaller_req->engine;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
- int i, ret, num_rings;
+ enum intel_engine_id id;
+ int ret, num_rings;
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1320,9 +1404,9 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
if (ret)
return ret;
- for_each_ring(waiter, dev_priv, i) {
+ for_each_engine_id(waiter, dev_priv, id) {
u32 seqno;
- u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@@ -1334,7 +1418,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1344,11 +1428,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
- struct intel_engine_cs *signaller = signaller_req->ring;
+ struct intel_engine_cs *signaller = signaller_req->engine;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
- int i, ret, num_rings;
+ enum intel_engine_id id;
+ int ret, num_rings;
#define MBOX_UPDATE_DWORDS 3
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
@@ -1359,8 +1444,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
if (ret)
return ret;
- for_each_ring(useless, dev_priv, i) {
- i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
+ for_each_engine_id(useless, dev_priv, id) {
+ i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
if (i915_mmio_reg_valid(mbox_reg)) {
u32 seqno = i915_gem_request_get_seqno(signaller_req);
@@ -1389,22 +1474,23 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
static int
gen6_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
- if (ring->semaphore.signal)
- ret = ring->semaphore.signal(req, 4);
+ if (engine->semaphore.signal)
+ ret = engine->semaphore.signal(req, 4);
else
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
return 0;
}
@@ -1429,7 +1515,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_engine_cs *waiter = waiter_req->ring;
+ struct intel_engine_cs *waiter = waiter_req->engine;
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
@@ -1455,7 +1541,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_engine_cs *waiter = waiter_req->ring;
+ struct intel_engine_cs *waiter = waiter_req->engine;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
@@ -1503,8 +1589,8 @@ do { \
static int
pc_render_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = req->engine;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1519,78 +1605,93 @@ pc_render_add_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ intel_ring_emit(engine,
+ GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, 0);
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(engine,
+ engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, 0);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ intel_ring_emit(engine,
+ GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, 0);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine,
+ engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, 0);
+ __intel_ring_advance(engine);
return 0;
}
-static u32
-gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void
+gen6_seqno_barrier(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
- * ACTHD) before reading the status page. */
- if (!lazy_coherency) {
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- POSTING_READ(RING_ACTHD(ring->mmio_base));
- }
-
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ * ACTHD) before reading the status page.
+ *
+ * Note that this effectively stalls the read by the time it takes to
+ * do a memory transaction, which more or less ensures that the write
+ * from the GPU has sufficient time to invalidate the CPU cacheline.
+ * Alternatively we could delay the interrupt from the CS ring to give
+ * the write time to land, but that would incur a delay after every
+ * batch i.e. much more frequent than a delay when waiting for the
+ * interrupt (with the same net latency).
+ *
+ * Also note that to prevent whole machine hangs on gen7, we have to
+ * take the spinlock to guard against concurrent cacheline access.
+ */
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
static u32
-ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *engine)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
static void
-ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
}
static u32
-pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *engine)
{
- return ring->scratch.cpu_page[0];
+ return engine->scratch.cpu_page[0];
}
static void
-pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- ring->scratch.cpu_page[0] = seqno;
+ engine->scratch.cpu_page[0] = seqno;
}
static bool
-gen5_ring_get_irq(struct intel_engine_cs *ring)
+gen5_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1598,30 +1699,30 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0)
- gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ if (engine->irq_refcount++ == 0)
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-gen5_ring_put_irq(struct intel_engine_cs *ring)
+gen5_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0)
- gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ if (--engine->irq_refcount == 0)
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-i9xx_ring_get_irq(struct intel_engine_cs *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1629,8 +1730,8 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ if (engine->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
@@ -1640,15 +1741,15 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-i9xx_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- dev_priv->irq_mask |= ring->irq_enable_mask;
+ if (--engine->irq_refcount == 0) {
+ dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
@@ -1656,9 +1757,9 @@ i9xx_ring_put_irq(struct intel_engine_cs *ring)
}
static bool
-i8xx_ring_get_irq(struct intel_engine_cs *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1666,8 +1767,8 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ if (engine->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
@@ -1677,15 +1778,15 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-i8xx_ring_put_irq(struct intel_engine_cs *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- dev_priv->irq_mask |= ring->irq_enable_mask;
+ if (--engine->irq_refcount == 0) {
+ dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
@@ -1697,42 +1798,43 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_FLUSH);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
static int
i9xx_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
return 0;
}
static bool
-gen6_ring_get_irq(struct intel_engine_cs *ring)
+gen6_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1740,14 +1842,14 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS)
- I915_WRITE_IMR(ring,
- ~(ring->irq_enable_mask |
+ if (engine->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS)
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
GT_PARITY_ERROR(dev)));
else
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1755,27 +1857,27 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-gen6_ring_put_irq(struct intel_engine_cs *ring)
+gen6_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS)
- I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+ if (--engine->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS)
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
else
- I915_WRITE_IMR(ring, ~0);
- gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ I915_WRITE_IMR(engine, ~0);
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-hsw_vebox_get_irq(struct intel_engine_cs *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1783,9 +1885,9 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ if (engine->irq_refcount++ == 0) {
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1793,24 +1895,24 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
}
static void
-hsw_vebox_put_irq(struct intel_engine_cs *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~0);
- gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ if (--engine->irq_refcount == 0) {
+ I915_WRITE_IMR(engine, ~0);
+ gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-gen8_ring_get_irq(struct intel_engine_cs *ring)
+gen8_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1818,15 +1920,15 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS) {
- I915_WRITE_IMR(ring,
- ~(ring->irq_enable_mask |
+ if (engine->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
} else {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
}
- POSTING_READ(RING_IMR(ring->mmio_base));
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1834,21 +1936,21 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-gen8_ring_put_irq(struct intel_engine_cs *ring)
+gen8_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS) {
- I915_WRITE_IMR(ring,
+ if (--engine->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ I915_WRITE_IMR(engine,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
} else {
- I915_WRITE_IMR(ring, ~0);
+ I915_WRITE_IMR(engine, ~0);
}
- POSTING_READ(RING_IMR(ring->mmio_base));
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1858,20 +1960,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
@@ -1885,8 +1987,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
- u32 cs_offset = ring->scratch.gtt_offset;
+ struct intel_engine_cs *engine = req->engine;
+ u32 cs_offset = engine->scratch.gtt_offset;
int ret;
ret = intel_ring_begin(req, 6);
@@ -1894,13 +1996,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
return ret;
/* Evict the invalid PTE TLBs */
- intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0xdeadbeef);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(engine, cs_offset);
+ intel_ring_emit(engine, 0xdeadbeef);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
@@ -1914,16 +2016,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 4096);
- intel_ring_emit(ring, offset);
-
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(engine,
+ BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+ intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+ intel_ring_emit(engine, cs_offset);
+ intel_ring_emit(engine, 4096);
+ intel_ring_emit(engine, offset);
+
+ intel_ring_emit(engine, MI_FLUSH);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
/* ... and execute it. */
offset = cs_offset;
@@ -1933,10 +2036,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(engine);
return 0;
}
@@ -1946,55 +2049,55 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(engine);
return 0;
}
-static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
if (!dev_priv->status_page_dmah)
return;
- drm_pci_free(ring->dev, dev_priv->status_page_dmah);
- ring->status_page.page_addr = NULL;
+ drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+ engine->status_page.page_addr = NULL;
}
-static void cleanup_status_page(struct intel_engine_cs *ring)
+static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
- obj = ring->status_page.obj;
+ obj = engine->status_page.obj;
if (obj == NULL)
return;
kunmap(sg_page(obj->pages->sgl));
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
- ring->status_page.obj = NULL;
+ engine->status_page.obj = NULL;
}
-static int init_status_page(struct intel_engine_cs *ring)
+static int init_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *obj = ring->status_page.obj;
+ struct drm_i915_gem_object *obj = engine->status_page.obj;
if (obj == NULL) {
unsigned flags;
int ret;
- obj = i915_gem_alloc_object(ring->dev, 4096);
+ obj = i915_gem_alloc_object(engine->dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
return -ENOMEM;
@@ -2005,7 +2108,7 @@ static int init_status_page(struct intel_engine_cs *ring)
goto err_unref;
flags = 0;
- if (!HAS_LLC(ring->dev))
+ if (!HAS_LLC(engine->dev))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
@@ -2024,32 +2127,32 @@ err_unref:
return ret;
}
- ring->status_page.obj = obj;
+ engine->status_page.obj = obj;
}
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
- ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+ memset(engine->status_page.page_addr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- ring->name, ring->status_page.gfx_addr);
+ engine->name, engine->status_page.gfx_addr);
return 0;
}
-static int init_phys_status_page(struct intel_engine_cs *ring)
+static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
- drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(engine->status_page.page_addr, 0, PAGE_SIZE);
return 0;
}
@@ -2057,7 +2160,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
- vunmap(ringbuf->virtual_start);
+ i915_gem_object_unpin_map(ringbuf->obj);
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
@@ -2065,34 +2168,15 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- struct page **pages;
- void *addr;
- int i;
-
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- return NULL;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
-
- return addr;
-}
-
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
+ void *addr;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
@@ -2101,15 +2185,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret) {
- i915_gem_object_ggtt_unpin(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
- ringbuf->virtual_start = vmap_obj(obj);
- if (ringbuf->virtual_start == NULL) {
- i915_gem_object_ggtt_unpin(obj);
- return -ENOMEM;
+ addr = i915_gem_object_pin_map(obj);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_unpin;
}
} else {
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
@@ -2118,25 +2200,27 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret) {
- i915_gem_object_ggtt_unpin(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
- i915_gem_obj_ggtt_offset(obj), ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- i915_gem_object_ggtt_unpin(obj);
- return -EINVAL;
+ addr = ioremap_wc(ggtt->mappable_base +
+ i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+ if (addr == NULL) {
+ ret = -ENOMEM;
+ goto err_unpin;
}
}
+ ringbuf->virtual_start = addr;
ringbuf->vma = i915_gem_obj_to_ggtt(obj);
-
return 0;
+
+err_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+ return ret;
}
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
@@ -2179,7 +2263,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
return ERR_PTR(-ENOMEM);
}
- ring->ring = engine;
+ ring->engine = engine;
list_add(&ring->link, &engine->buffers);
ring->size = size;
@@ -2215,37 +2299,38 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
}
static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
struct intel_ringbuffer *ringbuf;
int ret;
- WARN_ON(ring->buffer);
+ WARN_ON(engine->buffer);
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->execlist_queue);
- INIT_LIST_HEAD(&ring->buffers);
- i915_gem_batch_pool_init(dev, &ring->batch_pool);
- memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+ engine->dev = dev;
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ INIT_LIST_HEAD(&engine->buffers);
+ i915_gem_batch_pool_init(dev, &engine->batch_pool);
+ memset(engine->semaphore.sync_seqno, 0,
+ sizeof(engine->semaphore.sync_seqno));
- init_waitqueue_head(&ring->irq_queue);
+ init_waitqueue_head(&engine->irq_queue);
- ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error;
}
- ring->buffer = ringbuf;
+ engine->buffer = ringbuf;
if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(ring);
+ ret = init_status_page(engine);
if (ret)
goto error;
} else {
- WARN_ON(ring->id != RCS);
- ret = init_phys_status_page(ring);
+ WARN_ON(engine->id != RCS);
+ ret = init_phys_status_page(engine);
if (ret)
goto error;
}
@@ -2253,122 +2338,76 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
+ engine->name, ret);
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
- ret = i915_cmd_parser_init_ring(ring);
+ ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
return 0;
error:
- intel_cleanup_ring_buffer(ring);
+ intel_cleanup_engine(engine);
return ret;
}
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_cleanup_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- dev_priv = to_i915(ring->dev);
+ dev_priv = to_i915(engine->dev);
- if (ring->buffer) {
- intel_stop_ring_buffer(ring);
- WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ if (engine->buffer) {
+ intel_stop_engine(engine);
+ WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
- intel_unpin_ringbuffer_obj(ring->buffer);
- intel_ringbuffer_free(ring->buffer);
- ring->buffer = NULL;
+ intel_unpin_ringbuffer_obj(engine->buffer);
+ intel_ringbuffer_free(engine->buffer);
+ engine->buffer = NULL;
}
- if (ring->cleanup)
- ring->cleanup(ring);
+ if (engine->cleanup)
+ engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(ring->dev)) {
- cleanup_status_page(ring);
+ if (I915_NEED_GFX_HWS(engine->dev)) {
+ cleanup_status_page(engine);
} else {
- WARN_ON(ring->id != RCS);
- cleanup_phys_status_page(ring);
- }
-
- i915_cmd_parser_fini_ring(ring);
- i915_gem_batch_pool_fini(&ring->batch_pool);
- ring->dev = NULL;
-}
-
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
- struct intel_ringbuffer *ringbuf = ring->buffer;
- struct drm_i915_gem_request *request;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= n)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(request, &ring->request_list, list) {
- space = __intel_ring_space(request->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= n)
- break;
+ WARN_ON(engine->id != RCS);
+ cleanup_phys_status_page(engine);
}
- if (WARN_ON(&request->list == &ring->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(request);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
+ i915_cmd_parser_fini_ring(engine);
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+ engine->dev = NULL;
}
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_engine_idle(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req;
/* Wait upon the last request to be completed */
- if (list_empty(&ring->request_list))
+ if (list_empty(&engine->request_list))
return 0;
- req = list_entry(ring->request_list.prev,
- struct drm_i915_gem_request,
- list);
+ req = list_entry(engine->request_list.prev,
+ struct drm_i915_gem_request,
+ list);
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
- atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
- to_i915(ring->dev)->mm.interruptible,
+ req->i915->mm.interruptible,
NULL, NULL);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- request->ringbuf = request->ring->buffer;
+ request->ringbuf = request->engine->buffer;
return 0;
}
@@ -2389,63 +2428,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request)
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
{
- WARN_ON(ringbuf->reserved_size);
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(ringbuf->reserved_size);
ringbuf->reserved_size = size;
}
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(!ringbuf->reserved_size);
ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
}
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
- ringbuf->reserved_in_use = true;
- ringbuf->reserved_tail = ringbuf->tail;
+ GEM_BUG_ON(!ringbuf->reserved_size);
+ ringbuf->reserved_size = 0;
}
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(!ringbuf->reserved_in_use);
- if (ringbuf->tail > ringbuf->reserved_tail) {
- WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
- "request reserved size too small: %d vs %d!\n",
- ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
- } else {
+ GEM_BUG_ON(ringbuf->reserved_size);
+}
+
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_gem_request *target;
+
+ intel_ring_update_space(ringbuf);
+ if (ringbuf->space >= bytes)
+ return 0;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the request,
+ * as that cannot be allowed to fail. During request finalisation,
+ * reserved_space is set to 0 to stop the overallocation and the
+ * assumption is that then we never need to wait (which has the
+ * risk of failing with EINTR).
+ *
+ * See also i915_gem_request_alloc() and i915_add_request().
+ */
+ GEM_BUG_ON(!ringbuf->reserved_size);
+
+ list_for_each_entry(target, &engine->request_list, list) {
+ unsigned space;
+
/*
- * The ring was wrapped while the reserved space was in use.
- * That means that some unknown amount of the ring tail was
- * no-op filled and skipped. Thus simply adding the ring size
- * to the tail and doing the above space check will not work.
- * Rather than attempt to track how much tail was skipped,
- * it is much simpler to say that also skipping the sanity
- * check every once in a while is not a big issue.
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
*/
+ if (target->ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ space = __intel_ring_space(target->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (space >= bytes)
+ break;
}
- ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
+ if (WARN_ON(&target->list == &engine->request_list))
+ return -ENOSPC;
+
+ return i915_wait_request(target);
}
-static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int bytes = num_dwords * sizeof(u32);
+ int total_bytes, wait_bytes;
bool need_wrap = false;
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + ringbuf->reserved_size;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2454,62 +2512,50 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
+ } else if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
} else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
}
- if (wait_bytes) {
- ret = ring_wait_for_space(ring, wait_bytes);
+ if (wait_bytes > ringbuf->space) {
+ int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
+ intel_ring_update_space(ringbuf);
+ if (unlikely(ringbuf->space < wait_bytes))
+ return -EAGAIN;
}
- return 0;
-}
-
-int intel_ring_begin(struct drm_i915_gem_request *req,
- int num_dwords)
-{
- struct intel_engine_cs *ring;
- struct drm_i915_private *dev_priv;
- int ret;
-
- WARN_ON(req == NULL);
- ring = req->ring;
- dev_priv = ring->dev->dev_private;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
+ if (unlikely(need_wrap)) {
+ GEM_BUG_ON(remain_actual > ringbuf->space);
+ GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
- ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
+ /* Fill the tail with MI_NOOP */
+ memset(ringbuf->virtual_start + ringbuf->tail,
+ 0, remain_actual);
+ ringbuf->tail = 0;
+ ringbuf->space -= remain_actual;
+ }
- ring->buffer->space -= num_dwords * sizeof(uint32_t);
+ ringbuf->space -= bytes;
+ GEM_BUG_ON(ringbuf->space < 0);
return 0;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ struct intel_engine_cs *engine = req->engine;
+ int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
@@ -2521,33 +2567,52 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
return ret;
while (num_dwords--)
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
- if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
- I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
- if (HAS_VEBOX(dev))
- I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+ /* Our semaphore implementation is strictly monotonic (i.e. we proceed
+ * so long as the semaphore value in the register/page is greater
+ * than the sync value), so whenever we reset the seqno,
+ * so long as we reset the tracking semaphore value to 0, it will
+ * always be before the next request's seqno. If we don't reset
+ * the semaphore value, then when the seqno moves backwards all
+ * future waits will complete instantly (causing rendering corruption).
+ */
+ if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+ if (dev_priv->semaphore_obj) {
+ struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
+ struct page *page = i915_gem_object_get_dirty_page(obj, 0);
+ void *semaphores = kmap(page);
+ memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+ 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+ kunmap(page);
}
+ memset(engine->semaphore.sync_seqno, 0,
+ sizeof(engine->semaphore.sync_seqno));
- ring->set_seqno(ring, seqno);
- ring->hangcheck.seqno = seqno;
+ engine->set_seqno(engine, seqno);
+ engine->last_submitted_seqno = seqno;
+
+ engine->hangcheck.seqno = seqno;
}
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
/* Every tail move must follow the sequence below */
@@ -2567,8 +2632,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
- I915_WRITE_TAIL(ring, value);
- POSTING_READ(RING_TAIL(ring->mmio_base));
+ I915_WRITE_TAIL(engine, value);
+ POSTING_READ(RING_TAIL(engine->mmio_base));
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
@@ -2580,7 +2645,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
uint32_t cmd;
int ret;
@@ -2589,7 +2654,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(engine->dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2608,16 +2673,17 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(engine->dev)->gen >= 8) {
+ intel_ring_emit(engine, 0); /* upper addr */
+ intel_ring_emit(engine, 0); /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
@@ -2626,8 +2692,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
- bool ppgtt = USES_PPGTT(ring->dev) &&
+ struct intel_engine_cs *engine = req->engine;
+ bool ppgtt = USES_PPGTT(engine->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
@@ -2636,13 +2702,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, lower_32_bits(offset));
+ intel_ring_emit(engine, upper_32_bits(offset));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -2652,22 +2718,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
@@ -2677,20 +2743,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
@@ -2700,8 +2766,8 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
@@ -2728,16 +2794,17 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ intel_ring_emit(engine, 0); /* upper addr */
+ intel_ring_emit(engine, 0); /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
@@ -2745,14 +2812,15 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_object *obj;
int ret;
- ring->name = "render ring";
- ring->id = RCS;
- ring->exec_id = I915_EXEC_RENDER;
- ring->mmio_base = RENDER_RING_BASE;
+ engine->name = "render ring";
+ engine->id = RCS;
+ engine->exec_id = I915_EXEC_RENDER;
+ engine->hw_id = 0;
+ engine->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
if (i915_semaphore_is_enabled(dev)) {
@@ -2772,34 +2840,36 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
}
- ring->init_context = intel_rcs_ctx_init;
- ring->add_request = gen6_add_request;
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->init_context = intel_rcs_ctx_init;
+ engine->add_request = gen6_add_request;
+ engine->flush = gen8_render_ring_flush;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
WARN_ON(!dev_priv->semaphore_obj);
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_rcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else if (INTEL_INFO(dev)->gen >= 6) {
- ring->init_context = intel_rcs_ctx_init;
- ring->add_request = gen6_add_request;
- ring->flush = gen7_render_ring_flush;
+ engine->init_context = intel_rcs_ctx_init;
+ engine->add_request = gen6_add_request;
+ engine->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
- ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->flush = gen6_render_ring_flush;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
@@ -2807,59 +2877,59 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
* initialized as INVALID. Gen8 will initialize the
* sema between VCS2 and RCS later.
*/
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = pc_render_get_seqno;
- ring->set_seqno = pc_render_set_seqno;
- ring->irq_get = gen5_ring_get_irq;
- ring->irq_put = gen5_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+ engine->add_request = pc_render_add_request;
+ engine->flush = gen4_render_ring_flush;
+ engine->get_seqno = pc_render_get_seqno;
+ engine->set_seqno = pc_render_set_seqno;
+ engine->irq_get = gen5_ring_get_irq;
+ engine->irq_put = gen5_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
- ring->add_request = i9xx_add_request;
+ engine->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
- ring->flush = gen2_render_ring_flush;
+ engine->flush = gen2_render_ring_flush;
else
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->flush = gen4_render_ring_flush;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
- ring->irq_get = i8xx_ring_get_irq;
- ring->irq_put = i8xx_ring_put_irq;
+ engine->irq_get = i8xx_ring_get_irq;
+ engine->irq_put = i8xx_ring_put_irq;
} else {
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
+ engine->irq_get = i9xx_ring_get_irq;
+ engine->irq_put = i9xx_ring_put_irq;
}
- ring->irq_enable_mask = I915_USER_INTERRUPT;
+ engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- ring->write_tail = ring_write_tail;
+ engine->write_tail = ring_write_tail;
if (IS_HASWELL(dev))
- ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
- ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i830_dispatch_execbuffer;
else
- ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init_hw = init_render_ring;
- ring->cleanup = render_ring_cleanup;
+ engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+ engine->init_hw = init_render_ring;
+ engine->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
@@ -2876,16 +2946,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret;
}
- ring->scratch.obj = obj;
- ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ engine->scratch.obj = obj;
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- ret = intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, engine);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
+ ret = intel_init_pipe_control(engine);
if (ret)
return ret;
}
@@ -2896,75 +2966,77 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS];
- ring->name = "bsd ring";
- ring->id = VCS;
- ring->exec_id = I915_EXEC_BSD;
+ engine->name = "bsd ring";
+ engine->id = VCS;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 1;
- ring->write_tail = ring_write_tail;
+ engine->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
- ring->mmio_base = GEN6_BSD_RING_BASE;
+ engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
- ring->write_tail = gen6_bsd_ring_write_tail;
- ring->flush = gen6_bsd_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->write_tail = gen6_bsd_ring_write_tail;
+ engine->flush = gen6_bsd_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
} else {
- ring->mmio_base = BSD_RING_BASE;
- ring->flush = bsd_ring_flush;
- ring->add_request = i9xx_add_request;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->mmio_base = BSD_RING_BASE;
+ engine->flush = bsd_ring_flush;
+ engine->add_request = i9xx_add_request;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
- ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
- ring->irq_get = gen5_ring_get_irq;
- ring->irq_put = gen5_ring_put_irq;
+ engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+ engine->irq_get = gen5_ring_get_irq;
+ engine->irq_put = gen5_ring_put_irq;
} else {
- ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
+ engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ engine->irq_get = i9xx_ring_get_irq;
+ engine->irq_put = i9xx_ring_put_irq;
}
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
/**
@@ -2973,68 +3045,72 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
-
- ring->name = "bsd2 ring";
- ring->id = VCS2;
- ring->exec_id = I915_EXEC_BSD;
-
- ring->write_tail = ring_write_tail;
- ring->mmio_base = GEN8_BSD2_RING_BASE;
- ring->flush = gen6_bsd_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask =
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
+
+ engine->name = "bsd2 ring";
+ engine->id = VCS2;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 4;
+
+ engine->write_tail = ring_write_tail;
+ engine->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->flush = gen6_bsd_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[BCS];
-
- ring->name = "blitter ring";
- ring->id = BCS;
- ring->exec_id = I915_EXEC_BLT;
-
- ring->mmio_base = BLT_RING_BASE;
- ring->write_tail = ring_write_tail;
- ring->flush = gen6_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ struct intel_engine_cs *engine = &dev_priv->engine[BCS];
+
+ engine->name = "blitter ring";
+ engine->id = BCS;
+ engine->exec_id = I915_EXEC_BLT;
+ engine->hw_id = 2;
+
+ engine->mmio_base = BLT_RING_BASE;
+ engine->write_tail = ring_write_tail;
+ engine->flush = gen6_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.sync_to = gen6_ring_sync;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
@@ -3042,127 +3118,129 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
* initialized as INVALID. Gen8 will initialize the
* sema between BCS and VCS2 later.
*/
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VECS];
- ring->name = "video enhancement ring";
- ring->id = VECS;
- ring->exec_id = I915_EXEC_VEBOX;
+ engine->name = "video enhancement ring";
+ engine->id = VECS;
+ engine->exec_id = I915_EXEC_VEBOX;
+ engine->hw_id = 3;
- ring->mmio_base = VEBOX_RING_BASE;
- ring->write_tail = ring_write_tail;
- ring->flush = gen6_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->mmio_base = VEBOX_RING_BASE;
+ engine->write_tail = ring_write_tail;
+ engine->flush = gen6_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- ring->irq_get = hsw_vebox_get_irq;
- ring->irq_put = hsw_vebox_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_get = hsw_vebox_get_irq;
+ engine->irq_put = hsw_vebox_put_irq;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
- if (!ring->gpu_caches_dirty)
+ if (!engine->gpu_caches_dirty)
return 0;
- ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
int
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
uint32_t flush_domains;
int ret;
flush_domains = 0;
- if (ring->gpu_caches_dirty)
+ if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_stop_engine(struct intel_engine_cs *engine)
{
int ret;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- ret = intel_ring_idle(ring);
- if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ ret = intel_engine_idle(engine);
+ if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- ring->name, ret);
+ engine->name, ret);
- stop_ring(ring);
+ stop_ring(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 566b0ae10..ff126485d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -52,34 +52,32 @@ struct intel_hw_status_page {
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define gen8_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
+ (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
- ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
- (i915_semaphore_seqno_size * (to)))
-
+ GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
- ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
- (i915_semaphore_seqno_size * (__ring)->id))
+ GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-#define GEN8_RING_SEMAPHORE_INIT do { \
+#define GEN8_RING_SEMAPHORE_INIT(e) do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
- ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
- ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
- ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
- ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
- ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
- ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
+ (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
+ (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
+ (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
+ (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
+ (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
- HANGCHECK_ACTIVE_LOOP,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
@@ -88,8 +86,8 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
- u64 max_acthd;
u32 seqno;
+ unsigned user_interrupts;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
@@ -101,7 +99,7 @@ struct intel_ringbuffer {
void __iomem *virtual_start;
struct i915_vma *vma;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct list_head link;
u32 head;
@@ -110,8 +108,6 @@ struct intel_ringbuffer {
int size;
int effective_size;
int reserved_size;
- int reserved_tail;
- bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -125,7 +121,7 @@ struct intel_ringbuffer {
};
struct intel_context;
-struct drm_i915_reg_descriptor;
+struct drm_i915_reg_table;
/*
* we use a single page to load ctx workarounds so all of these
@@ -148,17 +144,18 @@ struct i915_ctx_workarounds {
struct intel_engine_cs {
const char *name;
- enum intel_ring_id {
+ enum intel_engine_id {
RCS = 0,
BCS,
VCS,
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_RINGS 5
+#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
- unsigned int guc_id;
+ unsigned int hw_id;
+ unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
@@ -196,8 +193,8 @@ struct intel_engine_cs {
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
- u32 (*get_seqno)(struct intel_engine_cs *ring,
- bool lazy_coherency);
+ void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
+ u32 (*get_seqno)(struct intel_engine_cs *ring);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
@@ -246,16 +243,16 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- u32 sync_seqno[I915_NUM_RINGS-1];
+ u32 sync_seqno[I915_NUM_ENGINES-1];
union {
struct {
/* our mbox written by others */
- u32 wait[I915_NUM_RINGS];
+ u32 wait[I915_NUM_ENGINES];
/* mboxes this ring signals to */
- i915_reg_t signal[I915_NUM_RINGS];
+ i915_reg_t signal[I915_NUM_ENGINES];
} mbox;
- u64 signal_ggtt[I915_NUM_RINGS];
+ u64 signal_ggtt[I915_NUM_ENGINES];
};
/* AKA wait() */
@@ -268,10 +265,13 @@ struct intel_engine_cs {
} semaphore;
/* Execlists */
- spinlock_t execlist_lock;
+ struct tasklet_struct irq_tasklet;
+ spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
- u8 next_context_status_buffer;
+ unsigned int fw_domains;
+ unsigned int next_context_status_buffer;
+ unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
@@ -306,6 +306,7 @@ struct intel_engine_cs {
* inspecting request list.
*/
u32 last_submitted_seqno;
+ unsigned user_interrupts;
bool gpu_caches_dirty;
@@ -332,15 +333,8 @@ struct intel_engine_cs {
/*
* Table of registers allowed in commands that read/write registers.
*/
- const struct drm_i915_reg_descriptor *reg_table;
- int reg_count;
-
- /*
- * Table of registers allowed in commands that read/write registers, but
- * only from the DRM master.
- */
- const struct drm_i915_reg_descriptor *master_reg_table;
- int master_reg_count;
+ const struct drm_i915_reg_table *reg_tables;
+ int reg_table_count;
/*
* Returns the bitmask for the length field of the specified command.
@@ -356,19 +350,19 @@ struct intel_engine_cs {
};
static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_engine_initialized(struct intel_engine_cs *engine)
{
- return ring->dev != NULL;
+ return engine->dev != NULL;
}
static inline unsigned
-intel_ring_flag(struct intel_engine_cs *ring)
+intel_engine_flag(struct intel_engine_cs *engine)
{
- return 1 << ring->id;
+ return 1 << engine->id;
}
static inline u32
-intel_ring_sync_index(struct intel_engine_cs *ring,
+intel_ring_sync_index(struct intel_engine_cs *engine,
struct intel_engine_cs *other)
{
int idx;
@@ -381,34 +375,33 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
- idx = (other - ring) - 1;
+ idx = (other - engine) - 1;
if (idx < 0)
- idx += I915_NUM_RINGS;
+ idx += I915_NUM_ENGINES;
return idx;
}
static inline void
-intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
- drm_clflush_virt_range(&ring->status_page.page_addr[reg],
- sizeof(uint32_t));
+ mb();
+ clflush(&engine->status_page.page_addr[reg]);
+ mb();
}
static inline u32
-intel_read_status_page(struct intel_engine_cs *ring,
- int reg)
+intel_read_status_page(struct intel_engine_cs *engine, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
- barrier();
- return ring->status_page.page_addr[reg];
+ return READ_ONCE(engine->status_page.page_addr[reg]);
}
static inline void
-intel_write_status_page(struct intel_engine_cs *ring,
+intel_write_status_page(struct intel_engine_cs *engine,
int reg, u32 value)
{
- ring->status_page.page_addr[reg] = value;
+ engine->status_page.page_addr[reg] = value;
}
/*
@@ -439,42 +432,41 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
-void intel_stop_ring_buffer(struct intel_engine_cs *ring);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+void intel_stop_engine(struct intel_engine_cs *engine);
+void intel_cleanup_engine(struct intel_engine_cs *engine);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
+static inline void intel_ring_emit(struct intel_engine_cs *engine,
u32 data)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
- intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+ intel_ring_emit(engine, i915_mmio_reg_offset(reg));
}
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *engine)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-int intel_ring_space(struct intel_ringbuffer *ringbuf);
-bool intel_ring_stopped(struct intel_engine_cs *ring);
+bool intel_engine_stopped(struct intel_engine_cs *engine);
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int __must_check intel_engine_idle(struct intel_engine_cs *engine);
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
-void intel_fini_pipe_control(struct intel_engine_cs *ring);
-int intel_init_pipe_control(struct intel_engine_cs *ring);
+void intel_fini_pipe_control(struct intel_engine_cs *engine);
+int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -482,9 +474,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
-int init_workarounds_ring(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *engine);
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6e54d978d..7fb1da4e7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -89,6 +89,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_DSI_A:
+ return "TRANSCODER_DSI_A";
+ case POWER_DOMAIN_TRANSCODER_DSI_C:
+ return "TRANSCODER_DSI_C";
case POWER_DOMAIN_PORT_DDI_A_LANES:
return "PORT_DDI_A_LANES";
case POWER_DOMAIN_PORT_DDI_B_LANES:
@@ -393,11 +397,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
- BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
@@ -415,36 +414,21 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
- BIT(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
- WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
- WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@@ -457,11 +441,10 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
- WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
- WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be disabled.\n");
- WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@@ -472,24 +455,6 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
*/
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
-{
- uint32_t val, mask;
-
- mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
-
- if (IS_BROXTON(dev_priv))
- mask |= DC_STATE_DEBUG_MASK_CORES;
-
- /* The below bit doesn't need to be cleared ever afterwards */
- val = I915_READ(DC_STATE_DEBUG);
- if ((val & mask) != mask) {
- val |= mask;
- I915_WRITE(DC_STATE_DEBUG, val);
- POSTING_READ(DC_STATE_DEBUG);
- }
-}
-
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
u32 state)
{
@@ -527,10 +492,9 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
state, rewrites);
}
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
{
- uint32_t val;
- uint32_t mask;
+ u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
if (IS_BROXTON(dev_priv))
@@ -538,14 +502,30 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
else
mask |= DC_STATE_EN_UPTO_DC6;
- WARN_ON_ONCE(state & ~mask);
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
- if (i915.enable_dc == 0)
- state = DC_STATE_DISABLE;
- else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
- state = DC_STATE_EN_UPTO_DC5;
+ val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
+ dev_priv->csr.dc_state = val;
+}
+
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+{
+ uint32_t val;
+ uint32_t mask;
+
+ if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ state &= dev_priv->csr.allowed_dc_mask;
val = I915_READ(DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
@@ -590,13 +570,9 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
- WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
- "Platform doesn't support DC5.\n");
- WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
@@ -606,19 +582,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
-{
- /*
- * During initialization, the firmware may not be loaded yet.
- * We still want to make sure that the DC enabling flag is cleared.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- assert_rpm_wakelock_held(dev_priv);
-}
-
-static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
@@ -629,11 +593,6 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
- "Platform doesn't support DC6.\n");
- WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
@@ -642,47 +601,60 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
- /*
- * During initialization, the firmware may not be loaded yet.
- * We still want to make sure that the DC enabling flag is cleared.
- */
- if (dev_priv->power_domains.initializing)
- return;
+ assert_can_enable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC6\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be disabled.\n");
}
-static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
+void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
- assert_can_disable_dc5(dev_priv);
-
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- i915.enable_dc != 0 && i915.enable_dc != 1)
- assert_can_disable_dc6(dev_priv);
+ DRM_DEBUG_KMS("Disabling DC6\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void
+gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
{
- assert_can_enable_dc6(dev_priv);
+ enum skl_disp_power_wells power_well_id = power_well->data;
+ u32 val;
+ u32 mask;
- DRM_DEBUG_KMS("Enabling DC6\n");
+ mask = SKL_POWER_WELL_REQ(power_well_id);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ val = I915_READ(HSW_PWR_WELL_KVMR);
+ if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
+ power_well->name))
+ I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
-}
+ val = I915_READ(HSW_PWR_WELL_BIOS);
+ val |= I915_READ(HSW_PWR_WELL_DEBUG);
-void skl_disable_dc6(struct drm_i915_private *dev_priv)
-{
- assert_can_disable_dc6(dev_priv);
+ if (!(val & mask))
+ return;
- DRM_DEBUG_KMS("Disabling DC6\n");
+ /*
+ * DMC is known to force on the request bits for power well 1 on SKL
+ * and BXT and the misc IO power well on SKL but we don't expect any
+ * other request bits to be set, so WARN for those.
+ */
+ if (power_well_id == SKL_DISP_PW_1 ||
+ ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ power_well_id == SKL_DISP_PW_MISC_IO))
+ DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
+ "by DMC\n", power_well->name);
+ else
+ WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
+ power_well->name);
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
+ I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
}
static void skl_set_power_well(struct drm_i915_private *dev_priv,
@@ -739,10 +711,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- state_mask), 1))
- DRM_ERROR("%s enable timeout\n",
- power_well->name);
check_fuse_status = true;
}
} else {
@@ -751,8 +719,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
+
+ if (IS_GEN9(dev_priv))
+ gen9_sanitize_power_well_requests(dev_priv, power_well);
}
+ if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
+ 1))
+ DRM_ERROR("%s %s timeout\n",
+ power_well->name, enable ? "enable" : "disable");
+
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
@@ -833,32 +809,33 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- gen9_disable_dc5_dc6(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ if (IS_BROXTON(dev_priv)) {
+ broxton_cdclk_verify_state(dev_priv);
+ broxton_ddi_phy_verify_state(dev_priv);
+ }
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- i915.enable_dc != 0 && i915.enable_dc != 1)
+ if (!dev_priv->csr.dmc_payload)
+ return;
+
+ if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(dev_priv);
- else
+ else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(dev_priv);
}
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (power_well->count > 0) {
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- } else {
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- i915.enable_dc != 0 &&
- i915.enable_dc != 1)
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- else
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
- }
+ if (power_well->count > 0)
+ gen9_dc_off_power_well_enable(dev_priv, power_well);
+ else
+ gen9_dc_off_power_well_disable(dev_priv, power_well);
}
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
@@ -962,6 +939,17 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
return enabled;
}
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+}
+
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -984,6 +972,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
I915_WRITE(DPLL(pipe), val);
}
+ vlv_init_display_clock_gating(dev_priv);
+
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1622,34 +1612,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
}
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_GMBUS) | \
- BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- HSW_ALWAYS_ON_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
-#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+#define VLV_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DSI) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_GMBUS) | \
+ BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
@@ -1679,6 +1691,28 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
+#define CHV_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DSI) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
+ BIT(POWER_DOMAIN_GMBUS) | \
+ BIT(POWER_DOMAIN_INIT))
+
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
@@ -1746,7 +1780,7 @@ static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@@ -1760,7 +1794,7 @@ static struct i915_power_well bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@@ -1795,7 +1829,7 @@ static struct i915_power_well vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = PUNIT_POWER_WELL_ALWAYS_ON,
},
@@ -1853,7 +1887,7 @@ static struct i915_power_well chv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@@ -1863,7 +1897,7 @@ static struct i915_power_well chv_power_wells[] = {
* power wells don't actually exist. Pipe A power well is
* required for any pipe to work.
*/
- .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .domains = CHV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
@@ -1897,7 +1931,7 @@ static struct i915_power_well skl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = SKL_DISP_PW_ALWAYS_ON,
},
@@ -1953,44 +1987,16 @@ static struct i915_power_well skl_power_wells[] = {
},
};
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *well;
-
- if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- return;
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
-}
-
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *well;
-
- if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- return;
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_disable(dev_priv, well);
-}
-
static struct i915_power_well bxt_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "power well 1",
- .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+ .domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
},
@@ -2015,12 +2021,56 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
if (disable_power_well >= 0)
return !!disable_power_well;
- if (IS_BROXTON(dev_priv)) {
- DRM_DEBUG_KMS("Disabling display power well support\n");
- return 0;
+ return 1;
+}
+
+static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
+{
+ uint32_t mask;
+ int requested_dc;
+ int max_dc;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_BROXTON(dev_priv)) {
+ max_dc = 1;
+ /*
+ * DC9 has a separate HW flow from the rest of the DC states,
+ * not depending on the DMC firmware. It's needed by system
+ * suspend/resume, so allow it unconditionally.
+ */
+ mask = DC_STATE_EN_DC9;
+ } else {
+ max_dc = 0;
+ mask = 0;
}
- return 1;
+ if (!i915.disable_power_well)
+ max_dc = 0;
+
+ if (enable_dc >= 0 && enable_dc <= max_dc) {
+ requested_dc = enable_dc;
+ } else if (enable_dc == -1) {
+ requested_dc = max_dc;
+ } else if (enable_dc > max_dc && enable_dc <= 2) {
+ DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
+ requested_dc = max_dc;
+ } else {
+ DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ requested_dc = max_dc;
+ }
+
+ if (requested_dc > 1)
+ mask |= DC_STATE_EN_UPTO_DC6;
+ if (requested_dc > 0)
+ mask |= DC_STATE_EN_UPTO_DC5;
+
+ DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+
+ return mask;
}
#define set_power_wells(power_domains, __power_wells) ({ \
@@ -2041,6 +2091,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
i915.disable_power_well);
+ dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
+ i915.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
@@ -2050,17 +2102,17 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev_priv->dev)) {
+ if (IS_HASWELL(dev_priv)) {
set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_BROADWELL(dev_priv->dev)) {
+ } else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_power_wells(power_domains, skl_power_wells);
- } else if (IS_BROXTON(dev_priv->dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
set_power_wells(power_domains, chv_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
@@ -2120,9 +2172,10 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -2133,7 +2186,13 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
- skl_pw1_misc_io_init(dev_priv);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(dev_priv, well);
+
mutex_unlock(&power_domains->lock);
if (!resume)
@@ -2141,13 +2200,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
- gen9_set_dc_state_debugmask(dev_priv);
+ if (dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -2155,8 +2215,73 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
+
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_disable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+ uint32_t val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /*
+ * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+ * or else the reset will hang because there is no PCH to respond.
+ * Move the handshake programming to initialization sequence.
+ * Previously was left up to BIOS.
+ */
+ val = I915_READ(HSW_NDE_RSTWRN_OPT);
+ val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+ /* Enable PG1 */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ broxton_init_cdclk(dev_priv);
+ broxton_ddi_phy_init(dev_priv);
+
+ broxton_cdclk_verify_state(dev_priv);
+ broxton_ddi_phy_verify_state(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ broxton_ddi_phy_uninit(dev_priv);
+ broxton_uninit_cdclk(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+
+ /* Disable PG1 */
mutex_lock(&power_domains->lock);
- skl_pw1_misc_io_fini(dev_priv);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
mutex_unlock(&power_domains->lock);
}
@@ -2291,6 +2416,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
skl_display_core_init(dev_priv, resume);
+ } else if (IS_BROXTON(dev)) {
+ bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
@@ -2328,6 +2455,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
+ else if (IS_BROXTON(dev_priv))
+ bxt_display_core_uninit(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4ecc076c4..2128fae56 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1398,12 +1398,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
dotclock = pipe_config->port_clock;
+
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- if (HAS_PCH_SPLIT(dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
@@ -2262,9 +2260,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
if (sdvo->port == PORT_B)
- mapping = &(dev_priv->sdvo_mappings[0]);
+ mapping = &dev_priv->vbt.sdvo_mappings[0];
else
- mapping = &(dev_priv->sdvo_mappings[1]);
+ mapping = &dev_priv->vbt.sdvo_mappings[1];
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2280,9 +2278,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
u8 pin;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->sdvo_mappings[0];
+ mapping = &dev_priv->vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->sdvo_mappings[1];
+ mapping = &dev_priv->vbt.sdvo_mappings[1];
if (mapping->initialized &&
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
@@ -2318,11 +2316,11 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
- my_mapping = &dev_priv->sdvo_mappings[0];
- other_mapping = &dev_priv->sdvo_mappings[1];
+ my_mapping = &dev_priv->vbt.sdvo_mappings[0];
+ other_mapping = &dev_priv->vbt.sdvo_mappings[1];
} else {
- my_mapping = &dev_priv->sdvo_mappings[1];
- other_mapping = &dev_priv->sdvo_mappings[0];
+ my_mapping = &dev_priv->vbt.sdvo_mappings[1];
+ other_mapping = &dev_priv->vbt.sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a2582c455..0f3e2303e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -193,7 +193,7 @@ skl_update_plane(struct drm_plane *drm_plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
- unsigned int rotation;
+ unsigned int rotation = plane_state->base.rotation;
int x_offset, y_offset;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
@@ -213,7 +213,6 @@ skl_update_plane(struct drm_plane *drm_plane,
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
- rotation = plane_state->base.rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
@@ -351,6 +350,7 @@ vlv_update_plane(struct drm_plane *dplane,
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
+ unsigned int rotation = dplane->state->rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
@@ -423,12 +423,11 @@ vlv_update_plane(struct drm_plane *dplane,
crtc_h--;
linear_offset = y * fb->pitches[0] + x * cpp;
- sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= sprsurf_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
@@ -493,6 +492,7 @@ ivb_update_plane(struct drm_plane *plane,
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
u32 sprsurf_offset, linear_offset;
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
@@ -556,12 +556,11 @@ ivb_update_plane(struct drm_plane *plane,
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * cpp;
- sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= sprsurf_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
@@ -634,6 +633,7 @@ ilk_update_plane(struct drm_plane *plane,
int pipe = intel_plane->pipe;
u32 dvscntr, dvsscale;
u32 dvssurf_offset, linear_offset;
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
@@ -693,12 +693,11 @@ ilk_update_plane(struct drm_plane *plane,
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * cpp;
- dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= dvssurf_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
@@ -1026,8 +1025,8 @@ static uint32_t skl_plane_formats[] = {
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
- struct intel_plane *intel_plane;
- struct intel_plane_state *state;
+ struct intel_plane *intel_plane = NULL;
+ struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
@@ -1037,13 +1036,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
return -ENODEV;
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
- if (!intel_plane)
- return -ENOMEM;
+ if (!intel_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
state = intel_create_plane_state(&intel_plane->base);
if (!state) {
- kfree(intel_plane);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
intel_plane->base.state = &state->base;
@@ -1098,28 +1099,34 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
break;
default:
- kfree(intel_plane);
- return -ENODEV;
+ MISSING_CASE(INTEL_INFO(dev)->gen);
+ ret = -ENODEV;
+ goto fail;
}
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
+
possible_crtcs = (1 << pipe);
+
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY, NULL);
- if (ret) {
- kfree(intel_plane);
- goto out;
- }
+ if (ret)
+ goto fail;
intel_create_rotation_property(dev, intel_plane);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
-out:
+ return 0;
+
+fail:
+ kfree(state);
+ kfree(intel_plane);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6745bad5b..223129d3c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -326,24 +326,12 @@ static const struct color_conversion sdtv_csc_yprpb = {
.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
};
-static const struct color_conversion sdtv_csc_rgb = {
- .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
- .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
- .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
-};
-
static const struct color_conversion hdtv_csc_yprpb = {
.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
};
-static const struct color_conversion hdtv_csc_rgb = {
- .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
- .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
- .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
-};
-
static const struct video_levels component_levels = {
.blank = 279, .black = 279, .burst = 0,
};
@@ -1531,47 +1519,6 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the integrated TV is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the TV is present.
- */
-static int tv_is_present_in_vbt(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- union child_device_config *p_child;
- int i, ret;
-
- if (!dev_priv->vbt.child_dev_num)
- return 1;
-
- ret = 0;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
- /*
- * If the device type is not TV, continue.
- */
- switch (p_child->old.device_type) {
- case DEVICE_TYPE_INT_TV:
- case DEVICE_TYPE_TV:
- case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
- break;
- default:
- continue;
- }
- /* Only when the addin_offset is non-zero, it is regarded
- * as present.
- */
- if (p_child->old.addin_offset) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
-
void
intel_tv_init(struct drm_device *dev)
{
@@ -1587,13 +1534,10 @@ intel_tv_init(struct drm_device *dev)
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
- if (!tv_is_present_in_vbt(dev)) {
+ if (!intel_bios_is_tv_present(dev_priv)) {
DRM_DEBUG_KMS("Integrated TV is not present.\n");
return;
}
- /* Even if we have an encoder we may not have a connector */
- if (!dev_priv->vbt.int_tv_support)
- return;
/*
* Sanity check the TV output by checking to see if the
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 68b6f69aa..4f1dfe616 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
- mod_timer_pinned(&d->timer, jiffies + 1);
+ d->wake_count++;
+ hrtimer_start_range_ns(&d->timer,
+ ktime_set(0, NSEC_PER_MSEC),
+ NSEC_PER_MSEC,
+ HRTIMER_MODE_REL);
}
static inline void
@@ -107,22 +111,22 @@ static void
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
- for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(d, fw_domains, dev_priv) {
fw_domain_wait_ack_clear(d);
fw_domain_get(d);
- fw_domain_wait_ack(d);
}
+
+ for_each_fw_domain_masked(d, fw_domains, dev_priv)
+ fw_domain_wait_ack(d);
}
static void
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
- for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(d, fw_domains, dev_priv) {
fw_domain_put(d);
fw_domain_posting_read(d);
}
@@ -132,10 +136,9 @@ static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
/* No need to do for all, just do for first found */
- for_each_fw_domain(d, dev_priv, id) {
+ for_each_fw_domain(d, dev_priv) {
fw_domain_posting_read(d);
break;
}
@@ -145,12 +148,11 @@ static void
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
if (dev_priv->uncore.fw_domains == 0)
return;
- for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+ for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_reset(d);
fw_domains_posting_read(dev_priv);
@@ -204,7 +206,7 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(dev_priv->dev))
+ if (IS_VALLEYVIEW(dev_priv))
dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
@@ -224,9 +226,11 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
-static void intel_uncore_fw_release_timer(unsigned long arg)
+static enum hrtimer_restart
+intel_uncore_fw_release_timer(struct hrtimer *timer)
{
- struct intel_uncore_forcewake_domain *domain = (void *)arg;
+ struct intel_uncore_forcewake_domain *domain =
+ container_of(timer, struct intel_uncore_forcewake_domain, timer);
unsigned long irqflags;
assert_rpm_device_not_suspended(domain->i915);
@@ -240,6 +244,8 @@ static void intel_uncore_fw_release_timer(unsigned long arg)
1 << domain->id);
spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+
+ return HRTIMER_NORESTART;
}
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
@@ -248,7 +254,6 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
- enum forcewake_domain_id id;
enum forcewake_domains fw = 0, active_domains;
/* Hold uncore.lock across reset to prevent any register access
@@ -258,18 +263,18 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
while (1) {
active_domains = 0;
- for_each_fw_domain(domain, dev_priv, id) {
- if (del_timer_sync(&domain->timer) == 0)
+ for_each_fw_domain(domain, dev_priv) {
+ if (hrtimer_cancel(&domain->timer) == 0)
continue;
- intel_uncore_fw_release_timer((unsigned long)domain);
+ intel_uncore_fw_release_timer(&domain->timer);
}
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- for_each_fw_domain(domain, dev_priv, id) {
- if (timer_pending(&domain->timer))
- active_domains |= (1 << id);
+ for_each_fw_domain(domain, dev_priv) {
+ if (hrtimer_active(&domain->timer))
+ active_domains |= domain->mask;
}
if (active_domains == 0)
@@ -286,9 +291,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
WARN_ON(active_domains);
- for_each_fw_domain(domain, dev_priv, id)
+ for_each_fw_domain(domain, dev_priv)
if (domain->wake_count)
- fw |= 1 << id;
+ fw |= domain->mask;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -310,21 +315,49 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void intel_uncore_ellc_detect(struct drm_device *dev)
+static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ const unsigned int sets[4] = { 1, 1, 2, 2 };
+ const u32 cap = dev_priv->edram_cap;
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)] *
+ 1024 * 1024;
+}
+
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_EDRAM(dev_priv))
+ return 0;
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- INTEL_INFO(dev)->gen >= 9) &&
- (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
- /* The docs do not explain exactly how the calculation can be
- * made. It is somewhat guessable, but for now, it's always
- * 128MB.
- * NB: We can't write IDICR yet because we do not have gt funcs
+ /* The needed capability bits for size calculation
+ * are not there with pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ return 128 * 1024 * 1024;
+
+ return gen9_edram_size(dev_priv);
+}
+
+static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
+{
+ if (IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 9) {
+ dev_priv->edram_cap = __raw_i915_read32(dev_priv,
+ HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
- dev_priv->ellc_size = 128;
- DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ } else {
+ dev_priv->edram_cap = 0;
}
+
+ if (HAS_EDRAM(dev_priv))
+ DRM_INFO("Found %lluMB of eDRAM\n",
+ intel_uncore_edram_size(dev_priv) / (1024 * 1024));
}
static bool
@@ -410,16 +443,15 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (domain->wake_count++)
- fw_domains &= ~(1 << id);
+ fw_domains &= ~domain->mask;
}
if (fw_domains)
@@ -477,21 +509,19 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (WARN_ON(domain->wake_count == 0))
continue;
if (--domain->wake_count)
continue;
- domain->wake_count++;
fw_domain_arm_timer(domain);
}
}
@@ -539,18 +569,27 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- for_each_fw_domain(domain, dev_priv, id)
+ for_each_fw_domain(domain, dev_priv)
WARN_ON(domain->wake_count);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
+#define __gen6_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (NEEDS_FORCE_WAKE(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else \
+ __fwd = 0; \
+ __fwd; \
+})
+
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
@@ -564,6 +603,48 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x22000, 0x24000) || \
REG_RANGE((reg), 0x30000, 0x40000))
+#define __vlv_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (!NEEDS_FORCE_WAKE(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ __fwd; \
+})
+
+static const i915_reg_t gen8_shadowed_regs[] = {
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(u32 offset)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+ if (offset == gen8_shadowed_regs[i].reg)
+ return true;
+
+ return false;
+}
+
+#define __gen8_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else \
+ __fwd = 0; \
+ __fwd; \
+})
+
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
@@ -586,6 +667,34 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x9000, 0xB000) || \
REG_RANGE((reg), 0xF000, 0x10000))
+#define __chv_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (!NEEDS_FORCE_WAKE(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ __fwd; \
+})
+
+#define __chv_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ __fwd; \
+})
+
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@@ -618,6 +727,61 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+#define SKL_NEEDS_FORCE_WAKE(reg) \
+ ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (!SKL_NEEDS_FORCE_WAKE(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ __fwd = FORCEWAKE_BLITTER; \
+ __fwd; \
+})
+
+static const i915_reg_t gen9_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(u32 offset)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+ if (offset == gen9_shadowed_regs[i].reg)
+ return true;
+
+ return false;
+}
+
+#define __gen9_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ __fwd = FORCEWAKE_BLITTER; \
+ __fwd; \
+})
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -633,15 +797,6 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
- /* XXX. We limit the auto arming traces for mmio
- * debugs on these platforms. There are just too many
- * revealed by these and CI/Bat suffers from the noise.
- * Please fix and then re-enable the automatic traces.
- */
- if (i915.mmio_debug < 2 &&
- (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
- return;
-
if (WARN(check_for_unclaimed_mmio(dev_priv),
"Unclaimed register detected %s %s register 0x%x\n",
before ? "before" : "after",
@@ -716,23 +871,21 @@ __gen2_read(64)
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static inline void __force_wake_get(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (WARN_ON(!fw_domains))
return;
/* Ideally GCC would be constant-fold and eliminate this loop */
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (domain->wake_count) {
- fw_domains &= ~(1 << id);
+ fw_domains &= ~domain->mask;
continue;
}
- domain->wake_count++;
fw_domain_arm_timer(domain);
}
@@ -743,9 +896,11 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+ enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (NEEDS_FORCE_WAKE(offset)) \
- __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ fw_engine = __gen6_reg_read_fw_domains(offset); \
+ if (fw_engine) \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@@ -753,16 +908,11 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine = 0; \
+ enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (!NEEDS_FORCE_WAKE(offset)) \
- fw_engine = 0; \
- else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
+ fw_engine = __vlv_reg_read_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@@ -770,42 +920,23 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine = 0; \
+ enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (!NEEDS_FORCE_WAKE(offset)) \
- fw_engine = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ fw_engine = __chv_reg_read_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
-#define SKL_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (!SKL_NEEDS_FORCE_WAKE(offset)) \
- fw_engine = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- fw_engine = FORCEWAKE_BLITTER; \
+ fw_engine = __gen9_reg_read_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@@ -942,34 +1073,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
GEN6_WRITE_FOOTER; \
}
-static const i915_reg_t gen8_shadowed_regs[] = {
- FORCEWAKE_MT,
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- /* TODO: Other registers are not yet used */
-};
-
-static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
- if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
- return true;
-
- return false;
-}
-
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+ enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
- __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ fw_engine = __gen8_reg_write_fw_domains(offset); \
+ if (fw_engine) \
+ __force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
@@ -977,66 +1088,24 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- enum forcewake_domains fw_engine = 0; \
+ enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- if (!NEEDS_FORCE_WAKE(offset) || \
- is_gen8_shadowed(dev_priv, reg)) \
- fw_engine = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ fw_engine = __chv_reg_write_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-static const i915_reg_t gen9_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- FORCEWAKE_BLITTER_GEN9,
- FORCEWAKE_RENDER_GEN9,
- FORCEWAKE_MEDIA_GEN9,
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- /* TODO: Other registers are not yet used */
-};
-
-static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
- if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
- return true;
-
- return false;
-}
-
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- if (!SKL_NEEDS_FORCE_WAKE(offset) || \
- is_gen9_shadowed(dev_priv, reg)) \
- fw_engine = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- fw_engine = FORCEWAKE_BLITTER; \
+ fw_engine = __gen9_reg_write_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
@@ -1150,7 +1219,14 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
d->i915 = dev_priv;
d->id = domain_id;
- setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+ BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
+ BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
+
+ d->mask = 1 << domain_id;
+
+ hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ d->timer.function = intel_uncore_fw_release_timer;
dev_priv->uncore.fw_domains |= (1 << domain_id);
@@ -1161,7 +1237,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+ if (INTEL_INFO(dev_priv)->gen <= 5)
return;
if (IS_GEN9(dev)) {
@@ -1257,7 +1333,7 @@ void intel_uncore_init(struct drm_device *dev)
i915_check_vgpu(dev);
- intel_uncore_ellc_detect(dev);
+ intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
@@ -1437,7 +1513,7 @@ static int i915_reset_complete(struct drm_device *dev)
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i915_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
{
/* assert reset for at least 20 usec */
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
@@ -1454,13 +1530,13 @@ static int g4x_reset_complete(struct drm_device *dev)
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_device *dev)
+static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
{
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(dev), 500);
}
-static int g4x_do_reset(struct drm_device *dev)
+static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1490,7 +1566,7 @@ static int g4x_do_reset(struct drm_device *dev)
return 0;
}
-static int ironlake_do_reset(struct drm_device *dev)
+static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1514,75 +1590,132 @@ static int ironlake_do_reset(struct drm_device *dev)
return 0;
}
-static int gen6_do_reset(struct drm_device *dev)
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+ u32 hw_domain_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- /* Reset the chip */
+ int ret;
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
- __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+ __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
+
+#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
+ /* Spin waiting for the device to ack the reset requests */
+ ret = wait_for(ACKED, 500);
+#undef ACKED
+
+ return ret;
+}
- /* Spin waiting for the device to ack the reset request */
- ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+/**
+ * gen6_reset_engines - reset individual engines
+ * @dev: DRM device
+ * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ *
+ * This function will reset the individual engines that are set in engine_mask.
+ * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
+ *
+ * Note: It is responsibility of the caller to handle the difference between
+ * asking full domain reset versus reset for all available individual engines.
+ *
+ * Returns 0 on success, nonzero on error.
+ */
+static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *engine;
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN6_GRDOM_RENDER,
+ [BCS] = GEN6_GRDOM_BLT,
+ [VCS] = GEN6_GRDOM_MEDIA,
+ [VCS2] = GEN8_GRDOM_MEDIA2,
+ [VECS] = GEN6_GRDOM_VECS,
+ };
+ u32 hw_mask;
+ int ret;
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN6_GRDOM_FULL;
+ } else {
+ hw_mask = 0;
+ for_each_engine_masked(engine, dev_priv, engine_mask)
+ hw_mask |= hw_engine_mask[engine->id];
+ }
+
+ ret = gen6_hw_domain_reset(dev_priv, hw_mask);
intel_uncore_forcewake_reset(dev, true);
return ret;
}
-static int wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
+static int wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
{
- return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+ return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
}
-static int gen8_do_reset(struct drm_device *dev)
+static int gen8_request_engine_reset(struct intel_engine_cs *engine)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+ ret = wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700);
+ if (ret)
+ DRM_ERROR("%s: reset request timeout\n", engine->name);
+
+ return ret;
+}
+
+static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- int i;
- for_each_ring(engine, dev_priv, i) {
- I915_WRITE(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
- if (wait_for_register(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700)) {
- DRM_ERROR("%s: reset request timeout\n", engine->name);
+ for_each_engine_masked(engine, dev_priv, engine_mask)
+ if (gen8_request_engine_reset(engine))
goto not_ready;
- }
- }
- return gen6_do_reset(dev);
+ return gen6_reset_engines(dev, engine_mask);
not_ready:
- for_each_ring(engine, dev_priv, i)
- I915_WRITE(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ for_each_engine_masked(engine, dev_priv, engine_mask)
+ gen8_unrequest_engine_reset(engine);
return -EIO;
}
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
+ unsigned engine_mask)
{
if (!i915.reset)
return NULL;
if (INTEL_INFO(dev)->gen >= 8)
- return gen8_do_reset;
+ return gen8_reset_engines;
else if (INTEL_INFO(dev)->gen >= 6)
- return gen6_do_reset;
+ return gen6_reset_engines;
else if (IS_GEN5(dev))
return ironlake_do_reset;
else if (IS_G4X(dev))
@@ -1595,10 +1728,10 @@ static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
return NULL;
}
-int intel_gpu_reset(struct drm_device *dev)
+int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int (*reset)(struct drm_device *);
+ int (*reset)(struct drm_device *, unsigned);
int ret;
reset = intel_get_gpu_reset(dev);
@@ -1609,7 +1742,7 @@ int intel_gpu_reset(struct drm_device *dev)
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = reset(dev);
+ ret = reset(dev, engine_mask);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -1620,6 +1753,25 @@ bool intel_has_gpu_reset(struct drm_device *dev)
return intel_get_gpu_reset(dev) != NULL;
}
+int intel_guc_reset(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ unsigned long irqflags;
+
+ if (!i915.enable_guc_submission)
+ return -EINVAL;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return ret;
+}
+
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
return check_for_unclaimed_mmio(dev_priv);
@@ -1643,3 +1795,111 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
return false;
}
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ enum forcewake_domains fw_domains;
+
+ if (intel_vgpu_active(dev_priv->dev))
+ return 0;
+
+ switch (INTEL_INFO(dev_priv)->gen) {
+ case 9:
+ fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 8:
+ if (IS_CHERRYVIEW(dev_priv))
+ fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ else
+ fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 7:
+ case 6:
+ if (IS_VALLEYVIEW(dev_priv))
+ fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ else
+ fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ default:
+ MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+ case 5: /* forcewake was introduced with gen6 */
+ case 4:
+ case 3:
+ case 2:
+ return 0;
+ }
+
+ WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+ return fw_domains;
+}
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ enum forcewake_domains fw_domains;
+
+ if (intel_vgpu_active(dev_priv->dev))
+ return 0;
+
+ switch (INTEL_INFO(dev_priv)->gen) {
+ case 9:
+ fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 8:
+ if (IS_CHERRYVIEW(dev_priv))
+ fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+ else
+ fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 7:
+ case 6:
+ fw_domains = FORCEWAKE_RENDER;
+ break;
+ default:
+ MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ return 0;
+ }
+
+ WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+ return fw_domains;
+}
+
+/**
+ * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
+ * a register
+ * @dev_priv: pointer to struct drm_i915_private
+ * @reg: register in question
+ * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
+ *
+ * Returns a set of forcewake domains required to be taken with for example
+ * intel_uncore_forcewake_get for the specified register to be accessible in the
+ * specified mode (read, write or read/write) with raw mmio accessors.
+ *
+ * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
+ * callers to do FIFO management on their own or risk losing writes.
+ */
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, unsigned int op)
+{
+ enum forcewake_domains fw_domains = 0;
+
+ WARN_ON(!op);
+
+ if (op & FW_REG_READ)
+ fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+
+ if (op & FW_REG_WRITE)
+ fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+
+ return fw_domains;
+}
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
new file mode 100644
index 000000000..44fb0b35e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -0,0 +1,845 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * This information is private to VBT parsing in intel_bios.c.
+ *
+ * Please do NOT include anywhere else.
+ */
+#ifndef _INTEL_BIOS_PRIVATE
+#error "intel_vbt_defs.h is private to intel_bios.c"
+#endif
+
+#ifndef _INTEL_VBT_DEFS_H_
+#define _INTEL_VBT_DEFS_H_
+
+#include "intel_bios.h"
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature: VBT signature, always starts with "$VBT"
+ * @version: Version of this structure
+ * @header_size: Size of this structure
+ * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum: Checksum
+ * @reserved0: Reserved
+ * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset: Offsets of add-in data blocks from beginning of VBT
+ */
+struct vbt_header {
+ u8 signature[20];
+ u16 version;
+ u16 header_size;
+ u16 vbt_size;
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset;
+ u32 aim_offset[4];
+} __packed;
+
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature: BDB signature "BIOS_DATA_BLOCK"
+ * @version: Version of the data block definitions
+ * @header_size: Size of this structure
+ * @bdb_size: Size of BDB (BDB Header and data blocks)
+ */
+struct bdb_header {
+ u8 signature[16];
+ u16 version;
+ u16 header_size;
+ u16 bdb_size;
+} __packed;
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __packed;
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_PSR 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_MIPI_CONFIG 52
+#define BDB_MIPI_SEQUENCE 53
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __packed;
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+/*
+ * We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. Do not change; we rely on its size.
+ */
+struct old_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __packed;
+
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+
+struct common_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 not_common1[12];
+ u8 dvo_port;
+ u8 not_common2[2];
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 efp_routed:1;
+ u8 lane_reversal:1;
+ u8 lspcon:1;
+ u8 iboost:1;
+ u8 hpd_invert:1;
+ u8 flag_reserved:3;
+ u8 hdmi_support:1;
+ u8 dp_support:1;
+ u8 tmds_support:1;
+ u8 support_reserved:5;
+ u8 not_common3[12];
+ u8 iboost_level;
+} __packed;
+
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+ /* This one is safe to be used anywhere, but the code should still check
+ * the BDB version. */
+ u8 raw[33];
+ /* This one should only be kept for legacy code. */
+ struct old_child_dev_config old;
+ /* This one should also be safe to use anywhere, even without version
+ * checks. */
+ struct common_child_dev_config common;
+} __packed;
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * defs->child_dev_size;
+ */
+ uint8_t devices[0];
+} __packed;
+
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
+} __packed;
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __packed;
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __packed;
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __packed;
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __packed;
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __packed;
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct bdb_lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct bdb_lfp_backlight_data_entry data[16];
+ u8 level[16];
+} __packed;
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __packed;
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __packed;
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __packed;
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __packed;
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __packed;
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+ /* Driver features data block */
+ u16 rmpm_enabled:1;
+ u16 s2ddt_enabled:1;
+ u16 dpst_enabled:1;
+ u16 bltclt_enabled:1;
+ u16 adb_enabled:1;
+ u16 drrs_enabled:1;
+ u16 grs_enabled:1;
+ u16 gpmt_enabled:1;
+ u16 tbt_enabled:1;
+ u16 psr_enabled:1;
+ u16 ips_enabled:1;
+ u16 reserved3:4;
+ u16 pc_feature_valid:1;
+} __packed;
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
+ u64 edp_vswing_preemph; /* v173 */
+} __packed;
+
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
+
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
+
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+ struct psr_table psr_table[16];
+} __packed;
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
+#define DEVICE_TYPE_eDP 0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11
+#define DVO_PORT_HDMIE 12
+#define DVO_PORT_MIPIA 21
+#define DVO_PORT_MIPIB 22
+#define DVO_PORT_MIPIC 23
+#define DVO_PORT_MIPID 24
+
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data block
+ * block below
+ */
+#define MAX_MIPI_CONFIGURATIONS 6
+
+struct bdb_mipi_config {
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+} __packed;
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+ u8 version;
+ u8 data[0];
+} __packed;
+
+enum mipi_gpio_pin_index {
+ MIPI_GPIO_UNDEFINED = 0,
+ MIPI_GPIO_PANEL_ENABLE,
+ MIPI_GPIO_BL_ENABLE,
+ MIPI_GPIO_PWM_ENABLE,
+ MIPI_GPIO_RESET_N,
+ MIPI_GPIO_PWR_DOWN_R,
+ MIPI_GPIO_STDBY_RST_N,
+ MIPI_GPIO_MAX
+};
+
+#endif /* _INTEL_VBT_DEFS_H_ */