summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /include/drm
Initial import
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/ati_pcigart.h30
-rw-r--r--include/drm/bridge/dw_hdmi.h62
-rw-r--r--include/drm/bridge/ptn3460.h45
-rw-r--r--include/drm/drmP.h1091
-rw-r--r--include/drm/drm_agpsupport.h199
-rw-r--r--include/drm/drm_atomic.h102
-rw-r--r--include/drm/drm_atomic_helper.h183
-rw-r--r--include/drm/drm_cache.h38
-rw-r--r--include/drm/drm_core.h34
-rw-r--r--include/drm/drm_crtc.h1543
-rw-r--r--include/drm/drm_crtc_helper.h239
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_helper.h761
-rw-r--r--include/drm/drm_dp_mst_helper.h519
-rw-r--r--include/drm/drm_edid.h393
-rw-r--r--include/drm/drm_encoder_slave.h182
-rw-r--r--include/drm/drm_fb_cma_helper.h31
-rw-r--r--include/drm/drm_fb_helper.h161
-rw-r--r--include/drm/drm_fixed.h162
-rw-r--r--include/drm/drm_flip_work.h92
-rw-r--r--include/drm/drm_gem.h185
-rw-r--r--include/drm/drm_gem_cma_helper.h70
-rw-r--r--include/drm/drm_global.h53
-rw-r--r--include/drm/drm_hashtab.h79
-rw-r--r--include/drm/drm_legacy.h203
-rw-r--r--include/drm/drm_mem_util.h65
-rw-r--r--include/drm/drm_mipi_dsi.h259
-rw-r--r--include/drm/drm_mm.h322
-rw-r--r--include/drm/drm_modes.h242
-rw-r--r--include/drm/drm_modeset_lock.h145
-rw-r--r--include/drm/drm_of.h18
-rw-r--r--include/drm/drm_os_linux.h65
-rw-r--r--include/drm/drm_panel.h145
-rw-r--r--include/drm/drm_pciids.h812
-rw-r--r--include/drm/drm_plane_helper.h116
-rw-r--r--include/drm/drm_rect.h173
-rw-r--r--include/drm/drm_sysfs.h12
-rw-r--r--include/drm/drm_vma_manager.h257
-rw-r--r--include/drm/exynos_drm.h101
-rw-r--r--include/drm/gma_drm.h25
-rw-r--r--include/drm/i2c/ch7006.h86
-rw-r--r--include/drm/i2c/sil164.h63
-rw-r--r--include/drm/i2c/tda998x.h30
-rw-r--r--include/drm/i915_component.h38
-rw-r--r--include/drm/i915_drm.h95
-rw-r--r--include/drm/i915_pciids.h290
-rw-r--r--include/drm/intel-gtt.h32
-rw-r--r--include/drm/ttm/ttm_bo_api.h700
-rw-r--r--include/drm/ttm/ttm_bo_driver.h1059
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h119
-rw-r--r--include/drm/ttm/ttm_lock.h247
-rw-r--r--include/drm/ttm/ttm_memory.h158
-rw-r--r--include/drm/ttm/ttm_module.h40
-rw-r--r--include/drm/ttm/ttm_object.h350
-rw-r--r--include/drm/ttm/ttm_page_alloc.h110
-rw-r--r--include/drm/ttm/ttm_placement.h95
56 files changed, 12802 insertions, 0 deletions
diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h
new file mode 100644
index 000000000..5765648b5
--- /dev/null
+++ b/include/drm/ati_pcigart.h
@@ -0,0 +1,30 @@
+#ifndef DRM_ATI_PCIGART_H
+#define DRM_ATI_PCIGART_H
+
+#include <drm/drm_legacy.h>
+
+/* location of GART table */
+#define DRM_ATI_GART_MAIN 1
+#define DRM_ATI_GART_FB 2
+
+#define DRM_ATI_GART_PCI 1
+#define DRM_ATI_GART_PCIE 2
+#define DRM_ATI_GART_IGP 3
+
+struct drm_ati_pcigart_info {
+ int gart_table_location;
+ int gart_reg_if;
+ void *addr;
+ dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ struct drm_dma_handle *table_handle;
+ struct drm_local_map mapping;
+ int table_size;
+};
+
+extern int drm_ati_pcigart_init(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
+ struct drm_ati_pcigart_info * gart_info);
+
+#endif
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
new file mode 100644
index 000000000..de13bfc35
--- /dev/null
+++ b/include/drm/bridge/dw_hdmi.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DW_HDMI__
+#define __DW_HDMI__
+
+#include <drm/drmP.h>
+
+enum {
+ DW_HDMI_RES_8,
+ DW_HDMI_RES_10,
+ DW_HDMI_RES_12,
+ DW_HDMI_RES_MAX,
+};
+
+enum dw_hdmi_devtype {
+ IMX6Q_HDMI,
+ IMX6DL_HDMI,
+ RK3288_HDMI,
+};
+
+struct dw_hdmi_mpll_config {
+ unsigned long mpixelclock;
+ struct {
+ u16 cpce;
+ u16 gmp;
+ } res[DW_HDMI_RES_MAX];
+};
+
+struct dw_hdmi_curr_ctrl {
+ unsigned long mpixelclock;
+ u16 curr[DW_HDMI_RES_MAX];
+};
+
+struct dw_hdmi_phy_config {
+ unsigned long mpixelclock;
+ u16 sym_ctr; /*clock symbol and transmitter control*/
+ u16 term; /*transmission termination value*/
+ u16 vlev_ctr; /* voltage level control */
+};
+
+struct dw_hdmi_plat_data {
+ enum dw_hdmi_devtype dev_type;
+ const struct dw_hdmi_mpll_config *mpll_cfg;
+ const struct dw_hdmi_curr_ctrl *cur_ctr;
+ const struct dw_hdmi_phy_config *phy_config;
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+};
+
+void dw_hdmi_unbind(struct device *dev, struct device *master, void *data);
+int dw_hdmi_bind(struct device *dev, struct device *master,
+ void *data, struct drm_encoder *encoder,
+ struct resource *iores, int irq,
+ const struct dw_hdmi_plat_data *plat_data);
+#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/ptn3460.h b/include/drm/bridge/ptn3460.h
new file mode 100644
index 000000000..b11f8e17e
--- /dev/null
+++ b/include/drm/bridge/ptn3460.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRM_BRIDGE_PTN3460_H_
+#define _DRM_BRIDGE_PTN3460_H_
+
+struct drm_device;
+struct drm_bridge;
+struct drm_encoder;
+struct i2c_client;
+struct device_node;
+
+#if defined(CONFIG_DRM_PTN3460) || defined(CONFIG_DRM_PTN3460_MODULE)
+
+int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
+ struct i2c_client *client, struct device_node *node);
+
+void ptn3460_destroy(struct drm_bridge *bridge);
+
+#else
+
+static inline int ptn3460_init(struct drm_device *dev,
+ struct drm_encoder *encoder, struct i2c_client *client,
+ struct device_node *node)
+{
+ return 0;
+}
+
+static inline void ptn3460_destroy(struct drm_bridge *bridge)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
new file mode 100644
index 000000000..62c40777c
--- /dev/null
+++ b/include/drm/drmP.h
@@ -0,0 +1,1091 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_P_H_
+#define _DRM_P_H_
+
+#include <linux/agp_backend.h>
+#include <linux/cdev.h>
+#include <linux/dma-mapping.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/ratelimit.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+
+#include <uapi/drm/drm.h>
+#include <uapi/drm/drm_mode.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_global.h>
+#include <drm/drm_hashtab.h>
+#include <drm/drm_mem_util.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_os_linux.h>
+#include <drm/drm_sarea.h>
+#include <drm/drm_vma_manager.h>
+
+struct module;
+
+struct drm_file;
+struct drm_device;
+struct drm_agp_head;
+struct drm_local_map;
+struct drm_device_dma;
+struct drm_dma_handle;
+struct drm_gem_object;
+
+struct device_node;
+struct videomode;
+struct reservation_object;
+struct dma_buf_attachment;
+
+/*
+ * 4 debug categories are defined:
+ *
+ * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
+ * This is the category used by the DRM_DEBUG() macro.
+ *
+ * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
+ * This is the category used by the DRM_DEBUG_DRIVER() macro.
+ *
+ * KMS: used in the modesetting code.
+ * This is the category used by the DRM_DEBUG_KMS() macro.
+ *
+ * PRIME: used in the prime code.
+ * This is the category used by the DRM_DEBUG_PRIME() macro.
+ *
+ * ATOMIC: used in the atomic code.
+ * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ *
+ * Enabling verbose debug messages is done through the drm.debug parameter,
+ * each category being enabled by a bit.
+ *
+ * drm.debug=0x1 will enable CORE messages
+ * drm.debug=0x2 will enable DRIVER messages
+ * drm.debug=0x3 will enable CORE and DRIVER messages
+ * ...
+ * drm.debug=0xf will enable all messages
+ *
+ * An interesting feature is that it's possible to enable verbose logging at
+ * run-time by echoing the debug value in its sysfs node:
+ * # echo 0xf > /sys/module/drm/parameters/debug
+ */
+#define DRM_UT_CORE 0x01
+#define DRM_UT_DRIVER 0x02
+#define DRM_UT_KMS 0x04
+#define DRM_UT_PRIME 0x08
+#define DRM_UT_ATOMIC 0x10
+
+extern __printf(2, 3)
+void drm_ut_debug_printk(const char *function_name,
+ const char *format, ...);
+extern __printf(1, 2)
+void drm_err(const char *format, ...);
+
+/***********************************************************************/
+/** \name DRM template customization defaults */
+/*@{*/
+
+/* driver capabilities and requirements mask */
+#define DRIVER_USE_AGP 0x1
+#define DRIVER_PCI_DMA 0x8
+#define DRIVER_SG 0x10
+#define DRIVER_HAVE_DMA 0x20
+#define DRIVER_HAVE_IRQ 0x40
+#define DRIVER_IRQ_SHARED 0x80
+#define DRIVER_GEM 0x1000
+#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
+#define DRIVER_ATOMIC 0x10000
+
+/***********************************************************************/
+/** \name Macros to make printk easier */
+/*@{*/
+
+/**
+ * Error output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_ERROR(fmt, ...) \
+ drm_err(fmt, ##__VA_ARGS__)
+
+/**
+ * Rate limited error output. Like DRM_ERROR() but won't flood the log.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_ERROR_RATELIMITED(fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&_rs)) \
+ drm_err(fmt, ##__VA_ARGS__); \
+})
+
+#define DRM_INFO(fmt, ...) \
+ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...) \
+ printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
+/**
+ * Debug output.
+ *
+ * \param fmt printf() like format string.
+ * \param arg arguments
+ */
+#define DRM_DEBUG(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_CORE)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+
+#define DRM_DEBUG_DRIVER(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+#define DRM_DEBUG_KMS(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+#define DRM_DEBUG_PRIME(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_PRIME)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+#define DRM_DEBUG_ATOMIC(fmt, args...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
+ drm_ut_debug_printk(__func__, fmt, ##args); \
+ } while (0)
+
+/*@}*/
+
+/***********************************************************************/
+/** \name Internal types and structures */
+/*@{*/
+
+#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+
+/**
+ * Ioctl function type.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private pointer.
+ * \param cmd command.
+ * \param arg argument.
+ */
+typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_MAJOR 226
+
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+#define DRM_CONTROL_ALLOW 0x8
+#define DRM_UNLOCKED 0x10
+#define DRM_RENDER_ALLOW 0x20
+
+struct drm_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+ drm_ioctl_t *func;
+ const char *name;
+};
+
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
+ .cmd = DRM_IOCTL_##ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
+
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+ struct drm_event *event;
+ struct list_head link;
+ struct drm_file *file_priv;
+ pid_t pid; /* pid of requester, no guarantee it's valid by the time
+ we deliver the event, for tracing only */
+ void (*destroy)(struct drm_pending_event *event);
+};
+
+/* initial implementaton using a linked list - todo hashtab */
+struct drm_prime_file_private {
+ struct list_head head;
+ struct mutex lock;
+};
+
+/** File private data */
+struct drm_file {
+ unsigned authenticated :1;
+ /* Whether we're master for a minor. Protected by master_mutex */
+ unsigned is_master :1;
+ /* true when the client has asked us to expose stereo 3D mode flags */
+ unsigned stereo_allowed :1;
+ /*
+ * true if client understands CRTC primary planes and cursor planes
+ * in the plane list
+ */
+ unsigned universal_planes:1;
+ /* true if client understands atomic properties */
+ unsigned atomic:1;
+
+ struct pid *pid;
+ kuid_t uid;
+ drm_magic_t magic;
+ struct list_head lhead;
+ struct drm_minor *minor;
+ unsigned long lock_count;
+
+ /** Mapping of mm object handles to object pointers. */
+ struct idr object_idr;
+ /** Lock for synchronization of access to object_idr. */
+ spinlock_t table_lock;
+
+ struct file *filp;
+ void *driver_priv;
+
+ struct drm_master *master; /* master this node is currently associated with
+ N.B. not always minor->master */
+ /**
+ * fbs - List of framebuffers associated with this file.
+ *
+ * Protected by fbs_lock. Note that the fbs list holds a reference on
+ * the fb object to prevent it from untimely disappearing.
+ */
+ struct list_head fbs;
+ struct mutex fbs_lock;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+ int event_space;
+
+ struct drm_prime_file_private prime;
+};
+
+/**
+ * Lock data.
+ */
+struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /**< Hardware lock */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
+ wait_queue_head_t lock_queue; /**< Queue of blocked processes */
+ unsigned long lock_time; /**< Time of last lock in jiffies */
+ spinlock_t spinlock;
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
+};
+
+/**
+ * struct drm_master - drm master structure
+ *
+ * @refcount: Refcount for this master object.
+ * @minor: Link back to minor char device we are master for. Immutable.
+ * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
+ * @unique_len: Length of unique field. Protected by drm_global_mutex.
+ * @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
+ * @magicfree: List of used authentication tokens. Protected by struct_mutex.
+ * @lock: DRI lock information.
+ * @driver_priv: Pointer to driver-private information.
+ */
+struct drm_master {
+ struct kref refcount;
+ struct drm_minor *minor;
+ char *unique;
+ int unique_len;
+ struct drm_open_hash magiclist;
+ struct list_head magicfree;
+ struct drm_lock_data lock;
+ void *driver_priv;
+};
+
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
+/**
+ * DRM driver structure. This structure represent the common code for
+ * a family of cards. There will one drm_device for each card present
+ * in this family
+ */
+struct drm_driver {
+ int (*load) (struct drm_device *, unsigned long flags);
+ int (*firstopen) (struct drm_device *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+ void (*postclose) (struct drm_device *, struct drm_file *);
+ void (*lastclose) (struct drm_device *);
+ int (*unload) (struct drm_device *);
+ int (*suspend) (struct drm_device *, pm_message_t state);
+ int (*resume) (struct drm_device *);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
+ int (*dma_quiescent) (struct drm_device *);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+
+ /**
+ * get_vblank_counter - get raw hardware vblank counter
+ * @dev: DRM device
+ * @crtc: counter to fetch
+ *
+ * Driver callback for fetching a raw hardware vblank counter for @crtc.
+ * If a device doesn't have a hardware counter, the driver can simply
+ * return the value of drm_vblank_count. The DRM core will account for
+ * missed vblank events while interrupts where disabled based on system
+ * timestamps.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code.
+ *
+ * RETURNS
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+
+ /**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+ int (*enable_vblank) (struct drm_device *dev, int crtc);
+
+ /**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+ void (*disable_vblank) (struct drm_device *dev, int crtc);
+
+ /**
+ * Called by \c drm_device_is_agp. Typically used to determine if a
+ * card is really attached to AGP or not.
+ *
+ * \param dev DRM device handle
+ *
+ * \returns
+ * One of three values is returned depending on whether or not the
+ * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
+ * (return of 1), or may or may not be AGP (return of 2).
+ */
+ int (*device_is_agp) (struct drm_device *dev);
+
+ /**
+ * Called by vblank timestamping code.
+ *
+ * Return the current display scanout position from a crtc, and an
+ * optional accurate ktime_get timestamp of when position was measured.
+ *
+ * \param dev DRM device.
+ * \param crtc Id of the crtc to query.
+ * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * \param *vpos Target location for current vertical scanout position.
+ * \param *hpos Target location for current horizontal scanout position.
+ * \param *stime Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * \param *etime Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successful.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant
+ * but unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, int crtc,
+ unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime,
+ ktime_t *etime);
+
+ /**
+ * Called by \c drm_get_last_vbltimestamp. Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * \param dev dev DRM device handle.
+ * \param crtc crtc for which timestamp should be returned.
+ * \param *max_error Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most *max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * \param *vblank_time Target location for returned vblank timestamp.
+ * \param flags 0 = Defaults, no special treatment needed.
+ * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * \returns
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+ /* these have to be filled in */
+
+ irqreturn_t(*irq_handler) (int irq, void *arg);
+ void (*irq_preinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
+
+ /* Master routines */
+ int (*master_create)(struct drm_device *dev, struct drm_master *master);
+ void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * master_set is called whenever the minor master is set.
+ * master_drop is called whenever the minor master is dropped.
+ */
+
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_release);
+
+ int (*debugfs_init)(struct drm_minor *minor);
+ void (*debugfs_cleanup)(struct drm_minor *minor);
+
+ /**
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ void (*gem_free_object) (struct drm_gem_object *obj);
+ int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+
+ /* prime: */
+ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags, int *prime_fd);
+ /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+ int prime_fd, uint32_t *handle);
+ /* export GEM -> dmabuf */
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ /* import dmabuf -> GEM */
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+ /* low-level interface used by drm_gem_prime_{import,export} */
+ int (*gem_prime_pin)(struct drm_gem_object *obj);
+ void (*gem_prime_unpin)(struct drm_gem_object *obj);
+ struct reservation_object * (*gem_prime_res_obj)(
+ struct drm_gem_object *obj);
+ struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*gem_prime_import_sg_table)(
+ struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+ void *(*gem_prime_vmap)(struct drm_gem_object *obj);
+ void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
+ int (*gem_prime_mmap)(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+ /* vga arb irq handler */
+ void (*vgaarb_irq)(struct drm_device *dev, bool state);
+
+ /* dumb alloc support */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
+ /* Driver private ops for this object */
+ const struct vm_operations_struct *gem_vm_ops;
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
+ u32 driver_features;
+ int dev_priv_size;
+ const struct drm_ioctl_desc *ioctls;
+ int num_ioctls;
+ const struct file_operations *fops;
+
+ /* List of devices hanging off this driver with stealth attach. */
+ struct list_head legacy_dev_list;
+};
+
+enum drm_minor_type {
+ DRM_MINOR_LEGACY,
+ DRM_MINOR_CONTROL,
+ DRM_MINOR_RENDER,
+ DRM_MINOR_CNT,
+};
+
+/**
+ * Info file list entry. This structure represents a debugfs or proc file to
+ * be created by the drm core
+ */
+struct drm_info_list {
+ const char *name; /** file name */
+ int (*show)(struct seq_file*, void*); /** show callback */
+ u32 driver_features; /**< Required driver features for this entry */
+ void *data;
+};
+
+/**
+ * debugfs node structure. This structure represents a debugfs file.
+ */
+struct drm_info_node {
+ struct list_head list;
+ struct drm_minor *minor;
+ const struct drm_info_list *info_ent;
+ struct dentry *dent;
+};
+
+/**
+ * DRM minor structure. This structure represents a drm minor number.
+ */
+struct drm_minor {
+ int index; /**< Minor device number */
+ int type; /**< Control or render */
+ struct device *kdev; /**< Linux device */
+ struct drm_device *dev;
+
+ struct dentry *debugfs_root;
+
+ struct list_head debugfs_list;
+ struct mutex debugfs_lock; /* Protects debugfs_list. */
+
+ /* currently active master for this node. Protected by master_mutex */
+ struct drm_master *master;
+ struct drm_mode_group mode_group;
+};
+
+
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ int pipe;
+ struct drm_event_vblank event;
+};
+
+struct drm_vblank_crtc {
+ struct drm_device *dev; /* pointer to the drm_device */
+ wait_queue_head_t queue; /**< VBLANK wait queue */
+ struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
+ struct timer_list disable_timer; /* delayed disable timer */
+ atomic_t count; /**< number of VBLANK interrupts */
+ atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ u32 last; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ u32 last_wait; /* Last vblank seqno waited per CRTC */
+ unsigned int inmodeset; /* Display driver is setting mode */
+ int crtc; /* crtc index */
+ bool enabled; /* so we don't call enable more than
+ once per disable */
+};
+
+/**
+ * DRM device structure. This structure represent a complete card that
+ * may contain multiple heads.
+ */
+struct drm_device {
+ struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
+ int if_version; /**< Highest interface version set */
+
+ /** \name Lifetime Management */
+ /*@{ */
+ struct kref ref; /**< Object ref-count */
+ struct device *dev; /**< Device structure of bus-device */
+ struct drm_driver *driver; /**< DRM driver managing the device */
+ void *dev_private; /**< DRM driver private data */
+ struct drm_minor *control; /**< Control node */
+ struct drm_minor *primary; /**< Primary node */
+ struct drm_minor *render; /**< Render node */
+ atomic_t unplugged; /**< Flag whether dev is dead */
+ struct inode *anon_inode; /**< inode for private address-space */
+ char *unique; /**< unique name of the device */
+ /*@} */
+
+ /** \name Locks */
+ /*@{ */
+ struct mutex struct_mutex; /**< For others */
+ struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
+ /*@} */
+
+ /** \name Usage Counters */
+ /*@{ */
+ int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
+ spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
+ int buf_use; /**< Buffers in use -- cannot alloc */
+ atomic_t buf_alloc; /**< Buffer allocation in progress */
+ /*@} */
+
+ struct list_head filelist;
+
+ /** \name Memory management */
+ /*@{ */
+ struct list_head maplist; /**< Linked list of regions */
+ struct drm_open_hash map_hash; /**< User token hash table for maps */
+
+ /** \name Context handle management */
+ /*@{ */
+ struct list_head ctxlist; /**< Linked list of context handles */
+ struct mutex ctxlist_mutex; /**< For ctxlist */
+
+ struct idr ctx_idr;
+
+ struct list_head vmalist; /**< List of vmas (for debugging) */
+
+ /*@} */
+
+ /** \name DMA support */
+ /*@{ */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
+ /*@} */
+
+ /** \name Context support */
+ /*@{ */
+
+ __volatile__ long context_flag; /**< Context swapping flag */
+ int last_context; /**< Last current context */
+ /*@} */
+
+ /** \name VBLANK IRQ support */
+ /*@{ */
+ bool irq_enabled;
+ int irq;
+
+ /*
+ * At load time, disabling the vblank interrupt won't be allowed since
+ * old clients may not call the modeset ioctl and therefore misbehave.
+ * Once the modeset ioctl *has* been called though, we can safely
+ * disable them when unused.
+ */
+ bool vblank_disable_allowed;
+
+ /*
+ * If true, vblank interrupt will be disabled immediately when the
+ * refcount drops to zero, as opposed to via the vblank disable
+ * timer.
+ * This can be set to true it the hardware has a working vblank
+ * counter and the driver uses drm_vblank_on() and drm_vblank_off()
+ * appropriately.
+ */
+ bool vblank_disable_immediate;
+
+ /* array of size num_crtcs */
+ struct drm_vblank_crtc *vblank;
+
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ spinlock_t vbl_lock;
+
+ u32 max_vblank_count; /**< size of vblank counter register */
+
+ /**
+ * List of events
+ */
+ struct list_head vblank_event_list;
+ spinlock_t event_lock;
+
+ /*@} */
+
+ struct drm_agp_head *agp; /**< AGP data */
+
+ struct pci_dev *pdev; /**< PCI device structure */
+#ifdef __alpha__
+ struct pci_controller *hose;
+#endif
+
+ struct platform_device *platformdev; /**< Platform device struture */
+
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
+ sigset_t sigmask;
+
+ struct {
+ int context;
+ struct drm_hw_lock *lock;
+ } sigdata;
+
+ struct drm_local_map *agp_buffer_map;
+ unsigned int agp_buffer_token;
+
+ struct drm_mode_config mode_config; /**< Current mode config */
+
+ /** \name GEM information */
+ /*@{ */
+ struct mutex object_name_lock;
+ struct idr object_name_idr;
+ struct drm_vma_offset_manager *vma_offset_manager;
+ /*@} */
+ int switch_power_state;
+};
+
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
+
+static __inline__ int drm_core_check_feature(struct drm_device *dev,
+ int feature)
+{
+ return ((dev->driver->driver_features & feature) ? 1 : 0);
+}
+
+static inline void drm_device_set_unplugged(struct drm_device *dev)
+{
+ smp_wmb();
+ atomic_set(&dev->unplugged, 1);
+}
+
+static inline int drm_device_is_unplugged(struct drm_device *dev)
+{
+ int ret = atomic_read(&dev->unplugged);
+ smp_rmb();
+ return ret;
+}
+
+static inline bool drm_is_render_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_RENDER;
+}
+
+static inline bool drm_is_control_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_CONTROL;
+}
+
+static inline bool drm_is_primary_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_LEGACY;
+}
+
+/******************************************************************/
+/** \name Internal function definitions */
+/*@{*/
+
+ /* Driver support (drm_drv.h) */
+extern long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern long drm_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
+
+ /* Device support (drm_fops.h) */
+extern int drm_open(struct inode *inode, struct file *filp);
+extern ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
+extern int drm_release(struct inode *inode, struct file *filp);
+
+ /* Mapping support (drm_vm.h) */
+extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+
+/* Misc. IOCTL support (drm_ioctl.c) */
+int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+void drm_clflush_sg(struct sg_table *st);
+void drm_clflush_virt_range(void *addr, unsigned long length);
+
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+ /* IRQ support (drm_irq.h) */
+extern int drm_irq_install(struct drm_device *dev, int irq);
+extern int drm_irq_uninstall(struct drm_device *dev);
+
+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+ struct drm_pending_vblank_event *e);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+extern int drm_vblank_get(struct drm_device *dev, int crtc);
+extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
+extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
+extern void drm_vblank_cleanup(struct drm_device *dev);
+
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ const struct drm_crtc *refcrtc,
+ const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() & co.
+ */
+static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
+
+/* Modesetting support */
+extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+
+ /* Stub support (drm_stub.h) */
+extern struct drm_master *drm_master_get(struct drm_master *master);
+extern void drm_master_put(struct drm_master **master);
+
+extern void drm_put_dev(struct drm_device *dev);
+extern void drm_unplug_dev(struct drm_device *dev);
+extern unsigned int drm_debug;
+extern bool drm_atomic;
+
+ /* Debugfs support */
+#if defined(CONFIG_DEBUG_FS)
+extern int drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor);
+extern int drm_debugfs_remove_files(const struct drm_info_list *files,
+ int count, struct drm_minor *minor);
+#else
+static inline int drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
+ int count, struct drm_minor *minor)
+{
+ return 0;
+}
+#endif
+
+extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd);
+extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+
+extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages);
+extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+
+
+extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align);
+extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
+
+ /* sysfs support (drm_sysfs.c) */
+extern void drm_sysfs_hotplug_event(struct drm_device *dev);
+
+
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent);
+void drm_dev_ref(struct drm_device *dev);
+void drm_dev_unref(struct drm_device *dev);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
+int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...);
+
+struct drm_minor *drm_minor_acquire(unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
+/*@}*/
+
+/* PCI section */
+static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
+{
+ if (dev->driver->device_is_agp != NULL) {
+ int err = (*dev->driver->device_is_agp) (dev);
+
+ if (err != 2) {
+ return err;
+ }
+ }
+
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
+}
+void drm_pci_agp_destroy(struct drm_device *dev);
+
+extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+#ifdef CONFIG_PCI
+extern int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver);
+extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+static inline int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ return -ENOSYS;
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -ENOSYS;
+}
+#endif
+
+#define DRM_PCIE_SPEED_25 1
+#define DRM_PCIE_SPEED_50 2
+#define DRM_PCIE_SPEED_80 4
+
+extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
+
+/* platform section */
+extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
+extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
+
+/* returns true if currently okay to sleep */
+static __inline__ bool drm_can_sleep(void)
+{
+ if (in_atomic() || in_dbg_master() || irqs_disabled())
+ return false;
+ return true;
+}
+
+#endif
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
new file mode 100644
index 000000000..055dc058d
--- /dev/null
+++ b/include/drm/drm_agpsupport.h
@@ -0,0 +1,199 @@
+#ifndef _DRM_AGPSUPPORT_H_
+#define _DRM_AGPSUPPORT_H_
+
+#include <linux/agp_backend.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <uapi/drm/drm.h>
+
+struct drm_device;
+struct drm_file;
+
+#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
+ defined(MODULE)))
+
+struct drm_agp_head {
+ struct agp_kern_info agp_info;
+ struct list_head memory;
+ unsigned long mode;
+ struct agp_bridge_data *bridge;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+};
+
+#if __OS_HAS_AGP
+
+void drm_free_agp(struct agp_memory * handle, int pages);
+int drm_bind_agp(struct agp_memory * handle, unsigned int start);
+int drm_unbind_agp(struct agp_memory * handle);
+struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type);
+
+struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+void drm_agp_clear(struct drm_device *dev);
+int drm_agp_acquire(struct drm_device *dev);
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_release(struct drm_device *dev);
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#else /* __OS_HAS_AGP */
+
+static inline void drm_free_agp(struct agp_memory * handle, int pages)
+{
+}
+
+static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
+{
+ return -ENODEV;
+}
+
+static inline int drm_unbind_agp(struct agp_memory * handle)
+{
+ return -ENODEV;
+}
+
+static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type)
+{
+ return NULL;
+}
+
+static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+ return NULL;
+}
+
+static inline void drm_agp_clear(struct drm_device *dev)
+{
+}
+
+static inline int drm_agp_acquire(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_release(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_enable(struct drm_device *dev,
+ struct drm_agp_mode mode)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_info(struct drm_device *dev,
+ struct drm_agp_info *info)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_alloc(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_free(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_unbind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_bind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+#endif /* __OS_HAS_AGP */
+
+#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
new file mode 100644
index 000000000..3f13b910f
--- /dev/null
+++ b/include/drm/drm_atomic.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_H_
+#define DRM_ATOMIC_H_
+
+#include <drm/drm_crtc.h>
+
+struct drm_atomic_state * __must_check
+drm_atomic_state_alloc(struct drm_device *dev);
+void drm_atomic_state_clear(struct drm_atomic_state *state);
+void drm_atomic_state_free(struct drm_atomic_state *state);
+
+struct drm_crtc_state * __must_check
+drm_atomic_get_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state, struct drm_property *property,
+ uint64_t val);
+struct drm_plane_state * __must_check
+drm_atomic_get_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane);
+int drm_atomic_plane_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val);
+struct drm_connector_state * __must_check
+drm_atomic_get_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector);
+int drm_atomic_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state, struct drm_property *property,
+ uint64_t val);
+
+int __must_check
+drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
+ struct drm_crtc *crtc);
+void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
+ struct drm_framebuffer *fb);
+int __must_check
+drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
+ struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+int
+drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc *crtc);
+
+void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
+
+int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
+int __must_check drm_atomic_commit(struct drm_atomic_state *state);
+int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+
+#define for_each_connector_in_state(state, connector, connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (state)->num_connector && \
+ ((connector) = (state)->connectors[__i], \
+ (connector_state) = (state)->connector_states[__i], 1); \
+ (__i)++) \
+ if (connector)
+
+#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (state)->dev->mode_config.num_crtc && \
+ ((crtc) = (state)->crtcs[__i], \
+ (crtc_state) = (state)->crtc_states[__i], 1); \
+ (__i)++) \
+ if (crtc_state)
+
+#define for_each_plane_in_state(state, plane, plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (state)->dev->mode_config.num_total_plane && \
+ ((plane) = (state)->planes[__i], \
+ (plane_state) = (state)->plane_states[__i], 1); \
+ (__i)++) \
+ if (plane_state)
+
+#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
new file mode 100644
index 000000000..d665781eb
--- /dev/null
+++ b/include/drm/drm_atomic_helper.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Copyright (C) 2014 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#ifndef DRM_ATOMIC_HELPER_H_
+#define DRM_ATOMIC_HELPER_H_
+
+#include <drm/drm_crtc.h>
+
+int drm_atomic_helper_check_modeset(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_check_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+int drm_atomic_helper_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async);
+
+void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+int drm_atomic_helper_prepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_commit_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
+ struct drm_atomic_state *old_state);
+
+void drm_atomic_helper_swap_state(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+/* implementations for legacy interfaces */
+int drm_atomic_helper_update_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+int drm_atomic_helper_set_config(struct drm_mode_set *set);
+
+int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags);
+void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
+ int mode);
+
+/* default implementations for state handling */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+
+/**
+ * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
+ * @plane: the loop cursor
+ * @crtc: the crtc whose planes are iterated
+ *
+ * This iterates over the current state, useful (for example) when applying
+ * atomic state after it has been checked and swapped. To iterate over the
+ * planes which *will* be attached (for ->atomic_check()) see
+ * drm_crtc_for_each_pending_plane()
+ */
+#define drm_atomic_crtc_for_each_plane(plane, crtc) \
+ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
+
+/**
+ * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied. Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state
+ */
+#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
+ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
+
+/*
+ * drm_atomic_plane_disabling - check whether a plane is being disabled
+ * @plane: plane object
+ * @old_state: previous atomic state
+ *
+ * Checks the atomic state of a plane to determine whether it's being disabled
+ * or not. This also WARNs if it detects an invalid state (both CRTC and FB
+ * need to either both be NULL or both be non-NULL).
+ *
+ * RETURNS:
+ * True if the plane is being disabled, false otherwise.
+ */
+static inline bool
+drm_atomic_plane_disabling(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL together.
+ * Anything else should be considered a bug in the atomic core, so we
+ * gently warn about it.
+ */
+ WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) ||
+ (plane->state->crtc != NULL && plane->state->fb == NULL));
+
+ /*
+ * When using the transitional helpers, old_state may be NULL. If so,
+ * we know nothing about the current state and have to assume that it
+ * might be enabled.
+ *
+ * When using the atomic helpers, old_state won't be NULL. Therefore
+ * this check assumes that either the driver will have reconstructed
+ * the correct state in ->reset() or that the driver will have taken
+ * appropriate measures to disable all planes.
+ */
+ return (!old_state || old_state->crtc) && !plane->state->crtc;
+}
+
+#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
new file mode 100644
index 000000000..7bfb06302
--- /dev/null
+++ b/include/drm/drm_cache.h
@@ -0,0 +1,38 @@
+/**************************************************************************
+ *
+ * Copyright 2009 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+
+#ifndef _DRM_CACHE_H_
+#define _DRM_CACHE_H_
+
+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+
+#endif
diff --git a/include/drm/drm_core.h b/include/drm/drm_core.h
new file mode 100644
index 000000000..4e7523863
--- /dev/null
+++ b/include/drm/drm_core.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
+
+#define CORE_NAME "drm"
+#define CORE_DESC "DRM shared core routines"
+#define CORE_DATE "20060810"
+
+#define DRM_IF_MAJOR 1
+#define DRM_IF_MINOR 4
+
+#define CORE_MAJOR 1
+#define CORE_MINOR 1
+#define CORE_PATCHLEVEL 0
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
new file mode 100644
index 000000000..54233583c
--- /dev/null
+++ b/include/drm/drm_crtc.h
@@ -0,0 +1,1543 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_CRTC_H__
+#define __DRM_CRTC_H__
+
+#include <linux/i2c.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/fb.h>
+#include <linux/hdmi.h>
+#include <linux/media-bus-format.h>
+#include <uapi/drm/drm_mode.h>
+#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_modeset_lock.h>
+
+struct drm_device;
+struct drm_mode_set;
+struct drm_framebuffer;
+struct drm_object_properties;
+struct drm_file;
+struct drm_clip_rect;
+struct device_node;
+struct fence;
+
+#define DRM_MODE_OBJECT_CRTC 0xcccccccc
+#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
+#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
+#define DRM_MODE_OBJECT_MODE 0xdededede
+#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
+#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_ANY 0
+
+struct drm_mode_object {
+ uint32_t id;
+ uint32_t type;
+ struct drm_object_properties *properties;
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 24
+struct drm_object_properties {
+ int count, atomic_count;
+ /* NOTE: if we ever start dynamically destroying properties (ie.
+ * not at drm_mode_config_cleanup() time), then we'd have to do
+ * a better job of detaching property from mode objects to avoid
+ * dangling property pointers:
+ */
+ struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY];
+ /* do not read/write values directly, but use drm_object_property_get_value()
+ * and drm_object_property_set_value():
+ */
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
+};
+
+static inline int64_t U642I64(uint64_t val)
+{
+ return (int64_t)*((int64_t *)&val);
+}
+static inline uint64_t I642U64(int64_t val)
+{
+ return (uint64_t)*((uint64_t *)&val);
+}
+
+/* rotation property bits */
+#define DRM_ROTATE_0 0
+#define DRM_ROTATE_90 1
+#define DRM_ROTATE_180 2
+#define DRM_ROTATE_270 3
+#define DRM_REFLECT_X 4
+#define DRM_REFLECT_Y 5
+
+enum drm_connector_force {
+ DRM_FORCE_UNSPECIFIED,
+ DRM_FORCE_OFF,
+ DRM_FORCE_ON, /* force on analog part normally */
+ DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */
+};
+
+#include <drm/drm_modes.h>
+
+enum drm_connector_status {
+ connector_status_connected = 1,
+ connector_status_disconnected = 2,
+ connector_status_unknown = 3,
+};
+
+enum subpixel_order {
+ SubPixelUnknown = 0,
+ SubPixelHorizontalRGB,
+ SubPixelHorizontalBGR,
+ SubPixelVerticalRGB,
+ SubPixelVerticalBGR,
+ SubPixelNone,
+};
+
+#define DRM_COLOR_FORMAT_RGB444 (1<<0)
+#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
+#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
+/*
+ * Describes a given display (e.g. CRT or flat panel) and its limitations.
+ */
+struct drm_display_info {
+ char name[DRM_DISPLAY_INFO_LEN];
+
+ /* Physical size */
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ /* Clock limits FIXME: storage format */
+ unsigned int min_vfreq, max_vfreq;
+ unsigned int min_hfreq, max_hfreq;
+ unsigned int pixel_clock;
+ unsigned int bpc;
+
+ enum subpixel_order subpixel_order;
+ u32 color_formats;
+
+ const u32 *bus_formats;
+ unsigned int num_bus_formats;
+
+ /* Mask of supported hdmi deep color modes */
+ u8 edid_hdmi_dc_modes;
+
+ u8 cea_rev;
+};
+
+/* data corresponds to displayid vend/prod/serial */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
+struct drm_framebuffer_funcs {
+ /* note: use drm_framebuffer_remove() */
+ void (*destroy)(struct drm_framebuffer *framebuffer);
+ int (*create_handle)(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle);
+ /*
+ * Optional callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
+};
+
+struct drm_framebuffer {
+ struct drm_device *dev;
+ /*
+ * Note that the fb is refcounted for the benefit of driver internals,
+ * for example some hw, disabling a CRTC/plane is asynchronous, and
+ * scanout does not actually complete until the next vblank. So some
+ * cleanup (like releasing the reference(s) on the backing GEM bo(s))
+ * should be deferred. In cases like this, the driver would like to
+ * hold a ref to the fb even though it has already been removed from
+ * userspace perspective.
+ */
+ struct kref refcount;
+ /*
+ * Place on the dev->mode_config.fb_list, access protected by
+ * dev->mode_config.fb_lock.
+ */
+ struct list_head head;
+ struct drm_mode_object base;
+ const struct drm_framebuffer_funcs *funcs;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ uint64_t modifier[4];
+ unsigned int width;
+ unsigned int height;
+ /* depth can be 15 or 16 */
+ unsigned int depth;
+ int bits_per_pixel;
+ int flags;
+ uint32_t pixel_format; /* fourcc format */
+ struct list_head filp_head;
+ /* if you are using the helper */
+ void *helper_private;
+};
+
+struct drm_property_blob {
+ struct drm_mode_object base;
+ struct list_head head;
+ size_t length;
+ unsigned char data[];
+};
+
+struct drm_property_enum {
+ uint64_t value;
+ struct list_head head;
+ char name[DRM_PROP_NAME_LEN];
+};
+
+struct drm_property {
+ struct list_head head;
+ struct drm_mode_object base;
+ uint32_t flags;
+ char name[DRM_PROP_NAME_LEN];
+ uint32_t num_values;
+ uint64_t *values;
+ struct drm_device *dev;
+
+ struct list_head enum_list;
+};
+
+struct drm_crtc;
+struct drm_connector;
+struct drm_encoder;
+struct drm_pending_vblank_event;
+struct drm_plane;
+struct drm_bridge;
+struct drm_atomic_state;
+
+/**
+ * struct drm_crtc_state - mutable CRTC state
+ * @crtc: backpointer to the CRTC
+ * @enable: whether the CRTC should be enabled, gates all other state
+ * @active: whether the CRTC is actively displaying (used for DPMS)
+ * @mode_changed: for use by helpers and drivers when computing state updates
+ * @active_changed: for use by helpers and drivers when computing state updates
+ * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
+ * @last_vblank_count: for helpers and drivers to capture the vblank of the
+ * update to ensure framebuffer cleanup isn't done too early
+ * @planes_changed: for use by helpers and drivers when computing state updates
+ * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
+ * @mode: current mode timings
+ * @event: optional pointer to a DRM event to signal upon completion of the
+ * state update
+ * @state: backpointer to global drm_atomic_state
+ *
+ * Note that the distinction between @enable and @active is rather subtile:
+ * Flipping @active while @enable is set without changing anything else may
+ * never return in a failure from the ->atomic_check callback. Userspace assumes
+ * that a DPMS On will always succeed. In other words: @enable controls resource
+ * assignment, @active controls the actual hardware state.
+ */
+struct drm_crtc_state {
+ struct drm_crtc *crtc;
+
+ bool enable;
+ bool active;
+
+ /* computed state bits used by helpers and drivers */
+ bool planes_changed : 1;
+ bool mode_changed : 1;
+ bool active_changed : 1;
+
+ /* attached planes bitmask:
+ * WARNING: transitional helpers do not maintain plane_mask so
+ * drivers not converted over to atomic helpers should not rely
+ * on plane_mask being accurate!
+ */
+ u32 plane_mask;
+
+ /* last_vblank_count: for vblank waits before cleanup */
+ u32 last_vblank_count;
+
+ /* adjusted_mode: for use by helpers and drivers */
+ struct drm_display_mode adjusted_mode;
+
+ struct drm_display_mode mode;
+
+ struct drm_pending_vblank_event *event;
+
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_crtc_funcs - control CRTCs for a given device
+ * @save: save CRTC state
+ * @restore: restore CRTC state
+ * @reset: reset CRTC after state has been invalidated (e.g. resume)
+ * @cursor_set: setup the cursor
+ * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
+ * @cursor_move: move the cursor
+ * @gamma_set: specify color ramp for CRTC
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
+ * @set_config: apply a new CRTC configuration
+ * @page_flip: initiate a page flip
+ * @atomic_duplicate_state: duplicate the atomic state for this CRTC
+ * @atomic_destroy_state: destroy an atomic state for this CRTC
+ * @atomic_set_property: set a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_set_property())
+ * @atomic_get_property: get a property on an atomic state for this CRTC
+ * (do not call directly, use drm_atomic_crtc_get_property())
+ *
+ * The drm_crtc_funcs structure is the central CRTC management structure
+ * in the DRM. Each CRTC controls one or more connectors (note that the name
+ * CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc.
+ * connectors, not just CRTs).
+ *
+ * Each driver is responsible for filling out this structure at startup time,
+ * in addition to providing other modesetting features, like i2c and DDC
+ * bus accessors.
+ */
+struct drm_crtc_funcs {
+ /* Save CRTC state */
+ void (*save)(struct drm_crtc *crtc); /* suspend? */
+ /* Restore CRTC state */
+ void (*restore)(struct drm_crtc *crtc); /* resume? */
+ /* Reset CRTC state */
+ void (*reset)(struct drm_crtc *crtc);
+
+ /* cursor controls */
+ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height,
+ int32_t hot_x, int32_t hot_y);
+ int (*cursor_move)(struct drm_crtc *crtc, int x, int y);
+
+ /* Set gamma on the CRTC */
+ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size);
+ /* Object destroy routine */
+ void (*destroy)(struct drm_crtc *crtc);
+
+ int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl described in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags);
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
+ void (*atomic_destroy_state)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ int (*atomic_set_property)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val);
+ int (*atomic_get_property)(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+};
+
+/**
+ * struct drm_crtc - central CRTC control structure
+ * @dev: parent DRM device
+ * @port: OF node used by drm_of_find_possible_crtcs()
+ * @head: list management
+ * @mutex: per-CRTC locking
+ * @base: base KMS object for ID tracking etc.
+ * @primary: primary plane for this CRTC
+ * @cursor: cursor plane for this CRTC
+ * @cursor_x: current x position of the cursor, used for universal cursor planes
+ * @cursor_y: current y position of the cursor, used for universal cursor planes
+ * @enabled: is this CRTC enabled?
+ * @mode: current mode timings
+ * @hwmode: mode timings as programmed to hw regs
+ * @invert_dimensions: for purposes of error checking crtc vs fb sizes,
+ * invert the width/height of the crtc. This is used if the driver
+ * is performing 90 or 270 degree rotated scanout
+ * @x: x position on screen
+ * @y: y position on screen
+ * @funcs: CRTC control functions
+ * @gamma_size: size of gamma ramp
+ * @gamma_store: gamma ramp values
+ * @framedur_ns: precise frame timing
+ * @linedur_ns: precise line timing
+ * @pixeldur_ns: precise pixel timing
+ * @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
+ * @state: current atomic state for this CRTC
+ * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
+ * legacy ioctls
+ *
+ * Each CRTC may have one or more connectors associated with it. This structure
+ * allows the CRTC to be controlled.
+ */
+struct drm_crtc {
+ struct drm_device *dev;
+ struct device_node *port;
+ struct list_head head;
+
+ /*
+ * crtc mutex
+ *
+ * This provides a read lock for the overall crtc state (mode, dpms
+ * state, ...) and a write lock for everything which can be update
+ * without a full modeset (fb, cursor data, ...)
+ */
+ struct drm_modeset_lock mutex;
+
+ struct drm_mode_object base;
+
+ /* primary and cursor planes for CRTC */
+ struct drm_plane *primary;
+ struct drm_plane *cursor;
+
+ /* position of cursor plane on crtc */
+ int cursor_x;
+ int cursor_y;
+
+ bool enabled;
+
+ /* Requested mode from modesetting. */
+ struct drm_display_mode mode;
+
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
+ bool invert_dimensions;
+
+ int x, y;
+ const struct drm_crtc_funcs *funcs;
+
+ /* CRTC gamma size for reporting to userspace */
+ uint32_t gamma_size;
+ uint16_t *gamma_store;
+
+ /* Constants needed for precise vblank and swap timestamping. */
+ int framedur_ns, linedur_ns, pixeldur_ns;
+
+ /* if you are using the helper */
+ const void *helper_private;
+
+ struct drm_object_properties properties;
+
+ struct drm_crtc_state *state;
+
+ /*
+ * For legacy crtc ioctls so that atomic drivers can get at the locking
+ * acquire context.
+ */
+ struct drm_modeset_acquire_ctx *acquire_ctx;
+};
+
+/**
+ * struct drm_connector_state - mutable connector state
+ * @connector: backpointer to the connector
+ * @crtc: CRTC to connect connector to, NULL if disabled
+ * @best_encoder: can be used by helpers and drivers to select the encoder
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_connector_state {
+ struct drm_connector *connector;
+
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
+
+ struct drm_encoder *best_encoder;
+
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_connector_funcs - control connectors on a given device
+ * @dpms: set power state
+ * @save: save connector state
+ * @restore: restore connector state
+ * @reset: reset connector after state has been invalidated (e.g. resume)
+ * @detect: is this connector active?
+ * @fill_modes: fill mode list for this connector
+ * @set_property: property for this connector may need an update
+ * @destroy: make object go away
+ * @force: notify the driver that the connector is forced on
+ * @atomic_duplicate_state: duplicate the atomic state for this connector
+ * @atomic_destroy_state: destroy an atomic state for this connector
+ * @atomic_set_property: set a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_set_property())
+ * @atomic_get_property: get a property on an atomic state for this connector
+ * (do not call directly, use drm_atomic_connector_get_property())
+ *
+ * Each CRTC may have one or more connectors attached to it. The functions
+ * below allow the core DRM code to control connectors, enumerate available modes,
+ * etc.
+ */
+struct drm_connector_funcs {
+ void (*dpms)(struct drm_connector *connector, int mode);
+ void (*save)(struct drm_connector *connector);
+ void (*restore)(struct drm_connector *connector);
+ void (*reset)(struct drm_connector *connector);
+
+ /* Check to see if anything is attached to the connector.
+ * @force is set to false whilst polling, true when checking the
+ * connector due to user request. @force can be used by the driver
+ * to avoid expensive, destructive operations during automated
+ * probing.
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force);
+ int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height);
+ int (*set_property)(struct drm_connector *connector, struct drm_property *property,
+ uint64_t val);
+ void (*destroy)(struct drm_connector *connector);
+ void (*force)(struct drm_connector *connector);
+
+ /* atomic update handling */
+ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
+ void (*atomic_destroy_state)(struct drm_connector *connector,
+ struct drm_connector_state *state);
+ int (*atomic_set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
+ int (*atomic_get_property)(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+};
+
+/**
+ * struct drm_encoder_funcs - encoder controls
+ * @reset: reset state (e.g. at init or resume time)
+ * @destroy: cleanup and free associated data
+ *
+ * Encoders sit between CRTCs and connectors.
+ */
+struct drm_encoder_funcs {
+ void (*reset)(struct drm_encoder *encoder);
+ void (*destroy)(struct drm_encoder *encoder);
+};
+
+#define DRM_CONNECTOR_MAX_ENCODER 3
+
+/**
+ * struct drm_encoder - central DRM encoder structure
+ * @dev: parent DRM device
+ * @head: list management
+ * @base: base KMS object
+ * @name: encoder name
+ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
+ * @possible_crtcs: bitmask of potential CRTC bindings
+ * @possible_clones: bitmask of potential sibling encoders for cloning
+ * @crtc: currently bound CRTC
+ * @bridge: bridge associated to the encoder
+ * @funcs: control functions
+ * @helper_private: mid-layer private data
+ *
+ * CRTCs drive pixels to encoders, which convert them into signals
+ * appropriate for a given connector or set of connectors.
+ */
+struct drm_encoder {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+ char *name;
+ int encoder_type;
+ uint32_t possible_crtcs;
+ uint32_t possible_clones;
+
+ struct drm_crtc *crtc;
+ struct drm_bridge *bridge;
+ const struct drm_encoder_funcs *funcs;
+ const void *helper_private;
+};
+
+/* should we poll this connector for connects and disconnects */
+/* hot plug detectable */
+#define DRM_CONNECTOR_POLL_HPD (1 << 0)
+/* poll for connections */
+#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
+/* can cleanly poll for disconnections without flickering the screen */
+/* DACs should rarely do this without a lot of testing */
+#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+
+#define MAX_ELD_BYTES 128
+
+/**
+ * struct drm_connector - central DRM connector control structure
+ * @dev: parent DRM device
+ * @kdev: kernel device for sysfs attributes
+ * @attr: sysfs attributes
+ * @head: list management
+ * @base: base KMS object
+ * @name: connector name
+ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ * @connector_type_id: index into connector type enum
+ * @interlace_allowed: can this connector handle interlaced modes?
+ * @doublescan_allowed: can this connector handle doublescan?
+ * @stereo_allowed: can this connector handle stereo modes?
+ * @modes: modes available on this connector (from fill_modes() + user)
+ * @status: one of the drm_connector_status enums (connected, not, or unknown)
+ * @probed_modes: list of modes derived directly from the display
+ * @display_info: information about attached display (e.g. from EDID)
+ * @funcs: connector control functions
+ * @edid_blob_ptr: DRM property containing EDID if present
+ * @properties: property tracking for this connector
+ * @path_blob_ptr: DRM blob property data for the DP MST path property
+ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
+ * @dpms: current dpms state
+ * @helper_private: mid-layer private data
+ * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
+ * @force: a %DRM_FORCE_<foo> state for forced mode sets
+ * @override_edid: has the EDID been overwritten through debugfs for testing?
+ * @encoder_ids: valid encoders for this connector
+ * @encoder: encoder driving this connector, if any
+ * @eld: EDID-like data, if present
+ * @dvi_dual: dual link DVI, if found
+ * @max_tmds_clock: max clock rate, if found
+ * @latency_present: AV delay info from ELD, if found
+ * @video_latency: video latency info from ELD, if found
+ * @audio_latency: audio latency info from ELD, if found
+ * @null_edid_counter: track sinks that give us all zeros for the EDID
+ * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @debugfs_entry: debugfs directory for this connector
+ * @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
+ *
+ * Each connector may be connected to one or more CRTCs, or may be clonable by
+ * another connector if they can share a CRTC. Each connector also has a specific
+ * position in the broader display (referred to as a 'screen' though it could
+ * span multiple monitors).
+ */
+struct drm_connector {
+ struct drm_device *dev;
+ struct device *kdev;
+ struct device_attribute *attr;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ char *name;
+ int connector_type;
+ int connector_type_id;
+ bool interlace_allowed;
+ bool doublescan_allowed;
+ bool stereo_allowed;
+ struct list_head modes; /* list of modes on this connector */
+
+ enum drm_connector_status status;
+
+ /* these are modes added by probing with DDC or the BIOS */
+ struct list_head probed_modes;
+
+ struct drm_display_info display_info;
+ const struct drm_connector_funcs *funcs;
+
+ struct drm_property_blob *edid_blob_ptr;
+ struct drm_object_properties properties;
+
+ struct drm_property_blob *path_blob_ptr;
+
+ struct drm_property_blob *tile_blob_ptr;
+
+ uint8_t polled; /* DRM_CONNECTOR_POLL_* */
+
+ /* requested DPMS state */
+ int dpms;
+
+ const void *helper_private;
+
+ /* forced on connector */
+ struct drm_cmdline_mode cmdline_mode;
+ enum drm_connector_force force;
+ bool override_edid;
+ uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ struct drm_encoder *encoder; /* currently active encoder */
+
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool dvi_dual;
+ int max_tmds_clock; /* in MHz */
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
+ int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ unsigned bad_edid_counter;
+
+ struct dentry *debugfs_entry;
+
+ struct drm_connector_state *state;
+
+ /* DisplayID bits */
+ bool has_tile;
+ struct drm_tile_group *tile_group;
+ bool tile_is_single_monitor;
+
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
+
+ struct list_head destroy_list;
+};
+
+/**
+ * struct drm_plane_state - mutable plane state
+ * @plane: backpointer to the plane
+ * @crtc: currently bound CRTC, NULL if disabled
+ * @fb: currently bound framebuffer
+ * @fence: optional fence to wait for before scanning out @fb
+ * @crtc_x: left position of visible portion of plane on crtc
+ * @crtc_y: upper position of visible portion of plane on crtc
+ * @crtc_w: width of visible portion of plane on crtc
+ * @crtc_h: height of visible portion of plane on crtc
+ * @src_x: left position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_y: upper position of visible portion of plane within
+ * plane (in 16.16)
+ * @src_w: width of visible portion of plane (in 16.16)
+ * @src_h: height of visible portion of plane (in 16.16)
+ * @state: backpointer to global drm_atomic_state
+ */
+struct drm_plane_state {
+ struct drm_plane *plane;
+
+ struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
+ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
+ struct fence *fence;
+
+ /* Signed dest location allows it to be partially off screen */
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w, crtc_h;
+
+ /* Source values are 16.16 fixed point */
+ uint32_t src_x, src_y;
+ uint32_t src_h, src_w;
+
+ /* Plane rotation */
+ unsigned int rotation;
+
+ struct drm_atomic_state *state;
+};
+
+
+/**
+ * struct drm_plane_funcs - driver plane control functions
+ * @update_plane: update the plane configuration
+ * @disable_plane: shut down the plane
+ * @destroy: clean up plane resources
+ * @reset: reset plane after state has been invalidated (e.g. resume)
+ * @set_property: called when a property is changed
+ * @atomic_duplicate_state: duplicate the atomic state for this plane
+ * @atomic_destroy_state: destroy an atomic state for this plane
+ * @atomic_set_property: set a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_set_property())
+ * @atomic_get_property: get a property on an atomic state for this plane
+ * (do not call directly, use drm_atomic_plane_get_property())
+ */
+struct drm_plane_funcs {
+ int (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+ int (*disable_plane)(struct drm_plane *plane);
+ void (*destroy)(struct drm_plane *plane);
+ void (*reset)(struct drm_plane *plane);
+
+ int (*set_property)(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val);
+
+ /* atomic update handling */
+ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
+ void (*atomic_destroy_state)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ int (*atomic_set_property)(struct drm_plane *plane,
+ struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t val);
+ int (*atomic_get_property)(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+};
+
+enum drm_plane_type {
+ DRM_PLANE_TYPE_OVERLAY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_CURSOR,
+};
+
+/**
+ * struct drm_plane - central DRM plane control structure
+ * @dev: DRM device this plane belongs to
+ * @head: for list management
+ * @base: base mode object
+ * @possible_crtcs: pipes this plane can be bound to
+ * @format_types: array of formats supported by this plane
+ * @format_count: number of formats supported
+ * @format_default: driver hasn't supplied supported formats for the plane
+ * @crtc: currently bound CRTC
+ * @fb: currently bound fb
+ * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
+ * drm_mode_set_config_internal() to implement correct refcounting.
+ * @funcs: helper functions
+ * @properties: property tracking for this plane
+ * @type: type of plane (overlay, primary, cursor)
+ * @state: current atomic state for this plane
+ */
+struct drm_plane {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_modeset_lock mutex;
+
+ struct drm_mode_object base;
+
+ uint32_t possible_crtcs;
+ uint32_t *format_types;
+ uint32_t format_count;
+ bool format_default;
+
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+
+ struct drm_framebuffer *old_fb;
+
+ const struct drm_plane_funcs *funcs;
+
+ struct drm_object_properties properties;
+
+ enum drm_plane_type type;
+
+ const void *helper_private;
+
+ struct drm_plane_state *state;
+};
+
+/**
+ * struct drm_bridge_funcs - drm_bridge control functions
+ * @attach: Called during drm_bridge_attach
+ * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
+ * @disable: Called right before encoder prepare, disables the bridge
+ * @post_disable: Called right after encoder prepare, for lockstepped disable
+ * @mode_set: Set this mode to the bridge
+ * @pre_enable: Called right before encoder commit, for lockstepped commit
+ * @enable: Called right after encoder commit, enables the bridge
+ */
+struct drm_bridge_funcs {
+ int (*attach)(struct drm_bridge *bridge);
+ bool (*mode_fixup)(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*disable)(struct drm_bridge *bridge);
+ void (*post_disable)(struct drm_bridge *bridge);
+ void (*mode_set)(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*pre_enable)(struct drm_bridge *bridge);
+ void (*enable)(struct drm_bridge *bridge);
+};
+
+/**
+ * struct drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @of_node: device node pointer to the bridge
+ * @list: to keep track of all added bridges
+ * @base: base mode object
+ * @funcs: control functions
+ * @driver_private: pointer to the bridge driver's internal context
+ */
+struct drm_bridge {
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+#ifdef CONFIG_OF
+ struct device_node *of_node;
+#endif
+ struct list_head list;
+
+ const struct drm_bridge_funcs *funcs;
+ void *driver_private;
+};
+
+/**
+ * struct drm_atomic_state - the global state object for atomic updates
+ * @dev: parent DRM device
+ * @allow_modeset: allow full modeset
+ * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics
+ * @planes: pointer to array of plane pointers
+ * @plane_states: pointer to array of plane states pointers
+ * @crtcs: pointer to array of CRTC pointers
+ * @crtc_states: pointer to array of CRTC states pointers
+ * @num_connector: size of the @connectors and @connector_states arrays
+ * @connectors: pointer to array of connector pointers
+ * @connector_states: pointer to array of connector states pointers
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
+struct drm_atomic_state {
+ struct drm_device *dev;
+ bool allow_modeset : 1;
+ bool legacy_cursor_update : 1;
+ struct drm_plane **planes;
+ struct drm_plane_state **plane_states;
+ struct drm_crtc **crtcs;
+ struct drm_crtc_state **crtc_states;
+ int num_connector;
+ struct drm_connector **connectors;
+ struct drm_connector_state **connector_states;
+
+ struct drm_modeset_acquire_ctx *acquire_ctx;
+};
+
+
+/**
+ * struct drm_mode_set - new values for a CRTC config change
+ * @fb: framebuffer to use for new config
+ * @crtc: CRTC whose configuration we're about to change
+ * @mode: mode timings to use
+ * @x: position of this CRTC relative to @fb
+ * @y: position of this CRTC relative to @fb
+ * @connectors: array of connectors to drive with this CRTC if possible
+ * @num_connectors: size of @connectors array
+ *
+ * Represents a single crtc the connectors that it drives with what mode
+ * and from which framebuffer it scans out from.
+ *
+ * This is used to set modes.
+ */
+struct drm_mode_set {
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+ struct drm_display_mode *mode;
+
+ uint32_t x;
+ uint32_t y;
+
+ struct drm_connector **connectors;
+ size_t num_connectors;
+};
+
+/**
+ * struct drm_mode_config_funcs - basic driver provided mode setting functions
+ * @fb_create: create a new framebuffer object
+ * @output_poll_changed: function to handle output configuration changes
+ * @atomic_check: check whether a given atomic state update is possible
+ * @atomic_commit: commit an atomic state update previously verified with
+ * atomic_check()
+ *
+ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
+ * involve drivers.
+ */
+struct drm_mode_config_funcs {
+ struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*output_poll_changed)(struct drm_device *dev);
+
+ int (*atomic_check)(struct drm_device *dev,
+ struct drm_atomic_state *a);
+ int (*atomic_commit)(struct drm_device *dev,
+ struct drm_atomic_state *a,
+ bool async);
+};
+
+/**
+ * struct drm_mode_group - group of mode setting resources for potential sub-grouping
+ * @num_crtcs: CRTC count
+ * @num_encoders: encoder count
+ * @num_connectors: connector count
+ * @num_bridges: bridge count
+ * @id_list: list of KMS object IDs in this group
+ *
+ * Currently this simply tracks the global mode setting state. But in the
+ * future it could allow groups of objects to be set aside into independent
+ * control groups for use by different user level processes (e.g. two X servers
+ * running simultaneously on different heads, each with their own mode
+ * configuration and freedom of mode setting).
+ */
+struct drm_mode_group {
+ uint32_t num_crtcs;
+ uint32_t num_encoders;
+ uint32_t num_connectors;
+
+ /* list of object IDs for this group */
+ uint32_t *id_list;
+};
+
+/**
+ * struct drm_mode_config - Mode configuration control structure
+ * @mutex: mutex protecting KMS related lists and structures
+ * @connection_mutex: ww mutex protecting connector state and routing
+ * @acquire_ctx: global implicit acquire context used by atomic drivers for
+ * legacy ioctls
+ * @idr_mutex: mutex for KMS ID allocation and management
+ * @crtc_idr: main KMS ID tracking object
+ * @fb_lock: mutex to protect fb state and lists
+ * @num_fb: number of fbs available
+ * @fb_list: list of framebuffers available
+ * @num_connector: number of connectors on this device
+ * @connector_list: list of connector objects
+ * @num_encoder: number of encoders on this device
+ * @encoder_list: list of encoder objects
+ * @num_overlay_plane: number of overlay planes on this device
+ * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
+ * @plane_list: list of plane objects
+ * @num_crtc: number of CRTCs on this device
+ * @crtc_list: list of CRTC objects
+ * @property_list: list of property objects
+ * @min_width: minimum pixel width on this device
+ * @min_height: minimum pixel height on this device
+ * @max_width: maximum pixel width on this device
+ * @max_height: maximum pixel height on this device
+ * @funcs: core driver provided mode setting functions
+ * @fb_base: base address of the framebuffer
+ * @poll_enabled: track polling support for this device
+ * @poll_running: track polling status for this device
+ * @output_poll_work: delayed work for polling in process context
+ * @property_blob_list: list of all the blob property objects
+ * @*_property: core property tracking
+ * @preferred_depth: preferred RBG pixel depth, used by fb helpers
+ * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
+ * @async_page_flip: does this device support async flips on the primary plane?
+ * @cursor_width: hint to userspace for max cursor width
+ * @cursor_height: hint to userspace for max cursor height
+ *
+ * Core mode resource tracking structure. All CRTC, encoders, and connectors
+ * enumerated by the driver are added here, as are global properties. Some
+ * global restrictions are also here, e.g. dimension restrictions.
+ */
+struct drm_mode_config {
+ struct mutex mutex; /* protects configuration (mode lists etc.) */
+ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
+ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
+ struct mutex idr_mutex; /* for IDR management */
+ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ /* this is limited to one for now */
+
+ struct mutex fb_lock; /* proctects global and per-file fb lists */
+ int num_fb;
+ struct list_head fb_list;
+
+ int num_connector;
+ struct list_head connector_list;
+ int num_encoder;
+ struct list_head encoder_list;
+
+ /*
+ * Track # of overlay planes separately from # of total planes. By
+ * default we only advertise overlay planes to userspace; if userspace
+ * sets the "universal plane" capability bit, we'll go ahead and
+ * expose all planes.
+ */
+ int num_overlay_plane;
+ int num_total_plane;
+ struct list_head plane_list;
+
+ int num_crtc;
+ struct list_head crtc_list;
+
+ struct list_head property_list;
+
+ int min_width, min_height;
+ int max_width, max_height;
+ const struct drm_mode_config_funcs *funcs;
+ resource_size_t fb_base;
+
+ /* output poll support */
+ bool poll_enabled;
+ bool poll_running;
+ bool delayed_event;
+ struct delayed_work output_poll_work;
+
+ /* pointers to standard properties */
+ struct list_head property_blob_list;
+ struct drm_property *edid_property;
+ struct drm_property *dpms_property;
+ struct drm_property *path_property;
+ struct drm_property *tile_property;
+ struct drm_property *plane_type_property;
+ struct drm_property *rotation_property;
+ struct drm_property *prop_src_x;
+ struct drm_property *prop_src_y;
+ struct drm_property *prop_src_w;
+ struct drm_property *prop_src_h;
+ struct drm_property *prop_crtc_x;
+ struct drm_property *prop_crtc_y;
+ struct drm_property *prop_crtc_w;
+ struct drm_property *prop_crtc_h;
+ struct drm_property *prop_fb_id;
+ struct drm_property *prop_crtc_id;
+ struct drm_property *prop_active;
+
+ /* DVI-I properties */
+ struct drm_property *dvi_i_subconnector_property;
+ struct drm_property *dvi_i_select_subconnector_property;
+
+ /* TV properties */
+ struct drm_property *tv_subconnector_property;
+ struct drm_property *tv_select_subconnector_property;
+ struct drm_property *tv_mode_property;
+ struct drm_property *tv_left_margin_property;
+ struct drm_property *tv_right_margin_property;
+ struct drm_property *tv_top_margin_property;
+ struct drm_property *tv_bottom_margin_property;
+ struct drm_property *tv_brightness_property;
+ struct drm_property *tv_contrast_property;
+ struct drm_property *tv_flicker_reduction_property;
+ struct drm_property *tv_overscan_property;
+ struct drm_property *tv_saturation_property;
+ struct drm_property *tv_hue_property;
+
+ /* Optional properties */
+ struct drm_property *scaling_mode_property;
+ struct drm_property *aspect_ratio_property;
+ struct drm_property *dirty_info_property;
+
+ /* properties for virtual machine layout */
+ struct drm_property *suggested_x_property;
+ struct drm_property *suggested_y_property;
+
+ /* dumb ioctl parameters */
+ uint32_t preferred_depth, prefer_shadow;
+
+ /* whether async page flip is supported or not */
+ bool async_page_flip;
+
+ /* whether the driver supports fb modifiers */
+ bool allow_fb_modifiers;
+
+ /* cursor size */
+ uint32_t cursor_width, cursor_height;
+};
+
+/**
+ * drm_for_each_plane_mask - iterate over planes specified by bitmask
+ * @plane: the loop cursor
+ * @dev: the DRM device
+ * @plane_mask: bitmask of plane indices
+ *
+ * Iterate over all planes specified by bitmask.
+ */
+#define drm_for_each_plane_mask(plane, dev, plane_mask) \
+ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
+ if ((plane_mask) & (1 << drm_plane_index(plane)))
+
+
+#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+#define obj_to_connector(x) container_of(x, struct drm_connector, base)
+#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
+#define obj_to_mode(x) container_of(x, struct drm_display_mode, base)
+#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+#define obj_to_property(x) container_of(x, struct drm_property, base)
+#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
+#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+
+struct drm_prop_enum_list {
+ int type;
+ char *name;
+};
+
+extern int drm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs);
+extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+/**
+ * drm_crtc_mask - find the mask of a registered CRTC
+ * @crtc: CRTC to find mask for
+ *
+ * Given a registered CRTC, return the mask bit of that CRTC for an
+ * encoder's possible_crtcs field.
+ */
+static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
+{
+ return 1 << drm_crtc_index(crtc);
+}
+
+extern void drm_connector_ida_init(void);
+extern void drm_connector_ida_destroy(void);
+extern int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type);
+int drm_connector_register(struct drm_connector *connector);
+void drm_connector_unregister(struct drm_connector *connector);
+
+extern void drm_connector_cleanup(struct drm_connector *connector);
+extern unsigned int drm_connector_index(struct drm_connector *connector);
+/* helper to unplug all connectors from sysfs for device */
+extern void drm_connector_unplug_all(struct drm_device *dev);
+
+extern int drm_bridge_add(struct drm_bridge *bridge);
+extern void drm_bridge_remove(struct drm_bridge *bridge);
+extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+
+extern int drm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type);
+
+/**
+ * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
+ * @encoder: encoder to test
+ * @crtc: crtc to test
+ *
+ * Return false if @encoder can't be driven by @crtc, true otherwise.
+ */
+static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
+ struct drm_crtc *crtc)
+{
+ return !!(encoder->possible_crtcs & drm_crtc_mask(crtc));
+}
+
+extern int drm_universal_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ uint32_t format_count,
+ enum drm_plane_type type);
+extern int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool is_primary);
+extern void drm_plane_cleanup(struct drm_plane *plane);
+extern unsigned int drm_plane_index(struct drm_plane *plane);
+extern void drm_plane_force_disable(struct drm_plane *plane);
+extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
+extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
+ int *hdisplay, int *vdisplay);
+extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb);
+
+extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+
+extern const char *drm_get_connector_status_name(enum drm_connector_status status);
+extern const char *drm_get_subpixel_order_name(enum subpixel_order order);
+extern const char *drm_get_dpms_name(int val);
+extern const char *drm_get_dvi_i_subconnector_name(int val);
+extern const char *drm_get_dvi_i_select_name(int val);
+extern const char *drm_get_tv_subconnector_name(int val);
+extern const char *drm_get_tv_select_name(int val);
+extern void drm_fb_release(struct drm_file *file_priv);
+extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
+extern void drm_mode_group_destroy(struct drm_mode_group *group);
+extern void drm_reinit_primary_mode_group(struct drm_device *dev);
+extern bool drm_probe_ddc(struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern struct edid *drm_edid_duplicate(const struct edid *edid);
+extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
+extern void drm_mode_config_cleanup(struct drm_device *dev);
+
+extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
+extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid);
+
+extern int drm_display_info_set_bus_formats(struct drm_display_info *info,
+ const u32 *formats,
+ unsigned int num_formats);
+
+static inline bool drm_property_type_is(struct drm_property *property,
+ uint32_t type)
+{
+ /* instanceof for props.. handles extended type vs original types: */
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+ return property->flags & type;
+}
+
+static inline bool drm_property_type_valid(struct drm_property *property)
+{
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+ return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
+extern int drm_framebuffer_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ const struct drm_framebuffer_funcs *funcs);
+extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ uint32_t id);
+extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
+extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
+extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
+extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
+
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
+extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ const char *name, int num_values);
+extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
+ const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_values);
+struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
+ int flags, const char *name,
+ const struct drm_prop_enum_list *props,
+ int num_props,
+ uint64_t supported_bits);
+struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
+ const char *name,
+ uint64_t min, uint64_t max);
+struct drm_property *drm_property_create_signed_range(struct drm_device *dev,
+ int flags, const char *name,
+ int64_t min, int64_t max);
+struct drm_property *drm_property_create_object(struct drm_device *dev,
+ int flags, const char *name, uint32_t type);
+struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags,
+ const char *name);
+extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+extern int drm_property_add_enum(struct drm_property *property, int index,
+ uint64_t value, const char *name);
+extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
+extern int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int num_modes,
+ char *modes[]);
+extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
+extern bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value, struct drm_mode_object **ref);
+extern void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
+
+extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ int gamma_size);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
+
+/* IOCTLs */
+extern int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getconnector(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_set_config_internal(struct drm_mode_set *set);
+extern int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+extern int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getencoder(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern bool drm_detect_monitor_audio(struct edid *edid);
+extern bool drm_rgb_quant_range_selectable(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_add_modes_noedid(struct drm_connector *connector,
+ int hdisplay, int vdisplay);
+extern void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref);
+
+extern int drm_edid_header_is_valid(const u8 *raw_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+extern bool drm_edid_is_valid(struct edid *edid);
+
+extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ int hsize, int vsize, int fresh,
+ bool rb);
+
+extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t value);
+extern int drm_mode_atomic_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp);
+extern int drm_format_num_planes(uint32_t format);
+extern int drm_format_plane_cpp(uint32_t format, int plane);
+extern int drm_format_horz_chroma_subsampling(uint32_t format);
+extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern const char *drm_get_format_name(uint32_t format);
+extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations);
+extern unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations);
+
+/* Helpers */
+
+static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+ return mo ? obj_to_plane(mo) : NULL;
+}
+
+static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
+ return mo ? obj_to_crtc(mo) : NULL;
+}
+
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ return mo ? obj_to_encoder(mo) : NULL;
+}
+
+static inline struct drm_connector *drm_connector_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+ return mo ? obj_to_connector(mo) : NULL;
+}
+
+static inline struct drm_property *drm_property_find(struct drm_device *dev,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+ return mo ? obj_to_property(mo) : NULL;
+}
+
+static inline struct drm_property_blob *
+drm_property_blob_find(struct drm_device *dev, uint32_t id)
+{
+ struct drm_mode_object *mo;
+ mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+ return mo ? obj_to_blob(mo) : NULL;
+}
+
+/* Plane list iterator for legacy (overlay only) planes. */
+#define drm_for_each_legacy_plane(plane, planelist) \
+ list_for_each_entry(plane, planelist, head) \
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY)
+
+#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
new file mode 100644
index 000000000..c8fc18706
--- /dev/null
+++ b/include/drm/drm_crtc_helper.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * The DRM mode setting helper functions are common code for drivers to use if
+ * they wish. Drivers are not forced to use this code in their
+ * implementations but it would be useful if they code they do use at least
+ * provides a consistent interface and operation to userspace
+ */
+
+#ifndef __DRM_CRTC_HELPER_H__
+#define __DRM_CRTC_HELPER_H__
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/idr.h>
+
+#include <linux/fb.h>
+
+#include <drm/drm_crtc.h>
+
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
+/**
+ * struct drm_crtc_helper_funcs - helper operations for CRTCs
+ * @dpms: set power state
+ * @prepare: prepare the CRTC, called before @mode_set
+ * @commit: commit changes to CRTC, called after @mode_set
+ * @mode_fixup: try to fixup proposed mode for this CRTC
+ * @mode_set: set this mode
+ * @mode_set_nofb: set mode only (no scanout buffer attached)
+ * @mode_set_base: update the scanout buffer
+ * @mode_set_base_atomic: non-blocking mode set (used for kgdb support)
+ * @load_lut: load color palette
+ * @disable: disable CRTC when no longer in use
+ * @enable: enable CRTC
+ * @atomic_check: check for validity of an atomic state
+ * @atomic_begin: begin atomic update
+ * @atomic_flush: flush atomic update
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ *
+ * Note that with atomic helpers @dpms, @prepare and @commit hooks are
+ * deprecated. Used @enable and @disable instead exclusively.
+ *
+ * With legacy crtc helpers there's a big semantic difference between @disable
+ * and the other hooks: @disable also needs to release any resources acquired in
+ * @mode_set (like shared PLLs).
+ */
+struct drm_crtc_helper_funcs {
+ /*
+ * Control power levels on the CRTC. If the mode passed in is
+ * unsupported, the provider must use the next lowest power level.
+ */
+ void (*dpms)(struct drm_crtc *crtc, int mode);
+ void (*prepare)(struct drm_crtc *crtc);
+ void (*commit)(struct drm_crtc *crtc);
+
+ /* Provider can fixup or change mode timings before modeset occurs */
+ bool (*mode_fixup)(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ /* Actually set the mode */
+ int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+ /* Actually set the mode for atomic helpers, optional */
+ void (*mode_set_nofb)(struct drm_crtc *crtc);
+
+ /* Move the crtc on the current fb to the given position *optional* */
+ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+ int (*mode_set_base_atomic)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y,
+ enum mode_set_atomic);
+
+ /* reload the current crtc LUT */
+ void (*load_lut)(struct drm_crtc *crtc);
+
+ void (*disable)(struct drm_crtc *crtc);
+ void (*enable)(struct drm_crtc *crtc);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+ void (*atomic_begin)(struct drm_crtc *crtc);
+ void (*atomic_flush)(struct drm_crtc *crtc);
+};
+
+/**
+ * struct drm_encoder_helper_funcs - helper operations for encoders
+ * @dpms: set power state
+ * @save: save connector state
+ * @restore: restore connector state
+ * @mode_fixup: try to fixup proposed mode for this connector
+ * @prepare: part of the disable sequence, called before the CRTC modeset
+ * @commit: called after the CRTC modeset
+ * @mode_set: set this mode, optional for atomic helpers
+ * @get_crtc: return CRTC that the encoder is currently attached to
+ * @detect: connection status detection
+ * @disable: disable encoder when not in use (overrides DPMS off)
+ * @enable: enable encoder
+ * @atomic_check: check for validity of an atomic update
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ *
+ * Note that with atomic helpers @dpms, @prepare and @commit hooks are
+ * deprecated. Used @enable and @disable instead exclusively.
+ *
+ * With legacy crtc helpers there's a big semantic difference between @disable
+ * and the other hooks: @disable also needs to release any resources acquired in
+ * @mode_set (like shared PLLs).
+ */
+struct drm_encoder_helper_funcs {
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+ void (*save)(struct drm_encoder *encoder);
+ void (*restore)(struct drm_encoder *encoder);
+
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*prepare)(struct drm_encoder *encoder);
+ void (*commit)(struct drm_encoder *encoder);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
+ /* detect for DAC style encoders */
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ void (*disable)(struct drm_encoder *encoder);
+
+ void (*enable)(struct drm_encoder *encoder);
+
+ /* atomic helpers */
+ int (*atomic_check)(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+};
+
+/**
+ * struct drm_connector_helper_funcs - helper operations for connectors
+ * @get_modes: get mode list for this connector
+ * @mode_valid: is this mode valid on the given connector? (optional)
+ * @best_encoder: return the preferred encoder for this connector
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_connector_helper_funcs {
+ int (*get_modes)(struct drm_connector *connector);
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+ struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+};
+
+extern void drm_helper_disable_unused_functions(struct drm_device *dev);
+extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
+extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+
+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
+extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ const struct drm_crtc_helper_funcs *funcs)
+{
+ crtc->helper_private = funcs;
+}
+
+static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
+ const struct drm_encoder_helper_funcs *funcs)
+{
+ encoder->helper_private = funcs;
+}
+
+static inline void drm_connector_helper_add(struct drm_connector *connector,
+ const struct drm_connector_helper_funcs *funcs)
+{
+ connector->helper_private = funcs;
+}
+
+extern void drm_helper_resume_force_mode(struct drm_device *dev);
+
+int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb);
+int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+/* drm_probe_helper.c */
+extern int drm_helper_probe_single_connector_modes(struct drm_connector
+ *connector, uint32_t maxX,
+ uint32_t maxY);
+extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector
+ *connector,
+ uint32_t maxX,
+ uint32_t maxY);
+extern void drm_kms_helper_poll_init(struct drm_device *dev);
+extern void drm_kms_helper_poll_fini(struct drm_device *dev);
+extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
+
+extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+
+#endif
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 000000000..623b4e98e
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef DRM_DISPLAYID_H
+#define DRM_DISPLAYID_H
+
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+struct displayid_hdr {
+ u8 rev;
+ u8 bytes;
+ u8 prod_id;
+ u8 ext_count;
+} __packed;
+
+struct displayid_block {
+ u8 tag;
+ u8 rev;
+ u8 num_bytes;
+} __packed;
+
+struct displayid_tiled_block {
+ struct displayid_block base;
+ u8 tile_cap;
+ u8 topo[3];
+ u8 tile_size[4];
+ u8 tile_pixel_bezel[5];
+ u8 topology_id[8];
+} __packed;
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
new file mode 100644
index 000000000..523f04c90
--- /dev/null
+++ b/include/drm/drm_dp_helper.h
@@ -0,0 +1,761 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+/*
+ * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
+ * DP and DPCD versions are independent. Differences from 1.0 are not noted,
+ * 1.0 devices basically don't exist in the wild.
+ *
+ * Abbreviations, in chronological order:
+ *
+ * eDP: Embedded DisplayPort version 1
+ * DPI: DisplayPort Interoperability Guideline v1.1a
+ * 1.2: DisplayPort 1.2
+ * MST: Multistream Transport - part of DP 1.2a
+ *
+ * 1.2 formally includes both eDP and DPI definitions.
+ */
+
+#define DP_AUX_MAX_PAYLOAD_BYTES 16
+
+#define DP_AUX_I2C_WRITE 0x0
+#define DP_AUX_I2C_READ 0x1
+#define DP_AUX_I2C_STATUS 0x2
+#define DP_AUX_I2C_MOT 0x4
+#define DP_AUX_NATIVE_WRITE 0x8
+#define DP_AUX_NATIVE_READ 0x9
+
+#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0)
+#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0)
+#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0)
+#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0)
+
+#define DP_AUX_I2C_REPLY_ACK (0x0 << 2)
+#define DP_AUX_I2C_REPLY_NACK (0x1 << 2)
+#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2)
+#define DP_AUX_I2C_REPLY_MASK (0x3 << 2)
+
+/* AUX CH addresses */
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
+# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
+# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
+# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
+# define DP_FORMAT_CONVERSION (1 << 3)
+# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+#define DP_DOWN_STREAM_PORT_COUNT 0x007
+# define DP_PORT_COUNT_MASK 0x0f
+# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
+# define DP_OUI_SUPPORT (1 << 7)
+
+#define DP_RECEIVE_PORT_0_CAP_0 0x008
+# define DP_LOCAL_EDID_PRESENT (1 << 1)
+# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+
+#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
+
+#define DP_RECEIVE_PORT_1_CAP_0 0x00a
+#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b
+
+#define DP_I2C_SPEED_CAP 0x00c /* DPI */
+# define DP_I2C_SPEED_1K 0x01
+# define DP_I2C_SPEED_5K 0x02
+# define DP_I2C_SPEED_10K 0x04
+# define DP_I2C_SPEED_100K 0x08
+# define DP_I2C_SPEED_400K 0x10
+# define DP_I2C_SPEED_1M 0x20
+
+#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0)
+# define DP_FRAMING_CHANGE_CAP (1 << 1)
+# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
+
+#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+
+#define DP_ADAPTER_CAP 0x00f /* 1.2 */
+# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
+# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1)
+
+#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */
+# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */
+
+/* Multiple stream transport */
+#define DP_FAUX_CAP 0x020 /* 1.2 */
+# define DP_FAUX_CAP_1 (1 << 0)
+
+#define DP_MSTM_CAP 0x021 /* 1.2 */
+# define DP_MST_CAP (1 << 0)
+
+#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
+
+/* AV_SYNC_DATA_BLOCK 1.2 */
+#define DP_AV_GRANULARITY 0x023
+# define DP_AG_FACTOR_MASK (0xf << 0)
+# define DP_AG_FACTOR_3MS (0 << 0)
+# define DP_AG_FACTOR_2MS (1 << 0)
+# define DP_AG_FACTOR_1MS (2 << 0)
+# define DP_AG_FACTOR_500US (3 << 0)
+# define DP_AG_FACTOR_200US (4 << 0)
+# define DP_AG_FACTOR_100US (5 << 0)
+# define DP_AG_FACTOR_10US (6 << 0)
+# define DP_AG_FACTOR_1US (7 << 0)
+# define DP_VG_FACTOR_MASK (0xf << 4)
+# define DP_VG_FACTOR_3MS (0 << 4)
+# define DP_VG_FACTOR_2MS (1 << 4)
+# define DP_VG_FACTOR_1MS (2 << 4)
+# define DP_VG_FACTOR_500US (3 << 4)
+# define DP_VG_FACTOR_200US (4 << 4)
+# define DP_VG_FACTOR_100US (5 << 4)
+
+#define DP_AUD_DEC_LAT0 0x024
+#define DP_AUD_DEC_LAT1 0x025
+
+#define DP_AUD_PP_LAT0 0x026
+#define DP_AUD_PP_LAT1 0x027
+
+#define DP_VID_INTER_LAT 0x028
+
+#define DP_VID_PROG_LAT 0x029
+
+#define DP_REP_LAT 0x02a
+
+#define DP_AUD_DEL_INS0 0x02b
+#define DP_AUD_DEL_INS1 0x02c
+#define DP_AUD_DEL_INS2 0x02d
+/* End of AV_SYNC_DATA_BLOCK */
+
+#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
+# define DP_ALPM_CAP (1 << 0)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
+
+#define DP_GUID 0x030 /* 1.2 */
+
+#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
+# define DP_PSR_IS_SUPPORTED 1
+# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+
+#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+
+/*
+ * 0x80-0x8f describe downstream port capabilities, but there are two layouts
+ * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
+ * each port's descriptor is one byte wide. If it was set, each port's is
+ * four bytes wide, starting with the one byte from the base info. As of
+ * DP interop v1.1a only VGA defines additional detail.
+ */
+
+/* offset 0 */
+#define DP_DOWNSTREAM_PORT_0 0x80
+# define DP_DS_PORT_TYPE_MASK (7 << 0)
+# define DP_DS_PORT_TYPE_DP 0
+# define DP_DS_PORT_TYPE_VGA 1
+# define DP_DS_PORT_TYPE_DVI 2
+# define DP_DS_PORT_TYPE_HDMI 3
+# define DP_DS_PORT_TYPE_NON_EDID 4
+# define DP_DS_PORT_HPD (1 << 3)
+/* offset 1 for VGA is maximum megapixels per second / 8 */
+/* offset 2 */
+# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
+# define DP_DS_VGA_8BPC 0
+# define DP_DS_VGA_10BPC 1
+# define DP_DS_VGA_12BPC 2
+# define DP_DS_VGA_16BPC 3
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+# define DP_LINK_BW_5_4 0x14 /* 1.2 */
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */
+# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
+# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
+/* bitmask as for DP_I2C_SPEED_CAP */
+
+#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
+# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0)
+# define DP_FRAMING_CHANGE_ENABLE (1 << 1)
+# define DP_PANEL_SELF_TEST_ENABLE (1 << 7)
+
+#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */
+#define DP_LINK_QUAL_LANE1_SET 0x10c
+#define DP_LINK_QUAL_LANE2_SET 0x10d
+#define DP_LINK_QUAL_LANE3_SET 0x10e
+# define DP_LINK_QUAL_PATTERN_DISABLE 0
+# define DP_LINK_QUAL_PATTERN_D10_2 1
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
+# define DP_LINK_QUAL_PATTERN_PRBS7 3
+# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
+# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
+# define DP_LINK_QUAL_PATTERN_MASK 7
+
+#define DP_TRAINING_LANE0_1_SET2 0x10f
+#define DP_TRAINING_LANE2_3_SET2 0x110
+# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0)
+# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2)
+# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4)
+# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6)
+
+#define DP_MSTM_CTRL 0x111 /* 1.2 */
+# define DP_MST_EN (1 << 0)
+# define DP_UP_REQ_EN (1 << 1)
+# define DP_UPSTREAM_IS_SRC (1 << 2)
+
+#define DP_AUDIO_DELAY0 0x112 /* 1.2 */
+#define DP_AUDIO_DELAY1 0x113
+#define DP_AUDIO_DELAY2 0x114
+
+#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */
+# define DP_LINK_RATE_SET_SHIFT 0
+# define DP_LINK_RATE_SET_MASK (7 << 0)
+
+#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
+# define DP_ALPM_ENABLE (1 << 0)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
+# define DP_IRQ_HPD_ENABLE (1 << 1)
+
+#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
+# define DP_PWR_NOT_NEEDED (1 << 0)
+
+#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
+# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
+
+#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
+# define DP_PSR_ENABLE (1 << 0)
+# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
+# define DP_PSR_CRC_VERIFICATION (1 << 2)
+# define DP_PSR_FRAME_CAPTURE (1 << 3)
+# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
+
+#define DP_ADAPTER_CTRL 0x1a0
+# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
+
+#define DP_BRANCH_DEVICE_CTRL 0x1a1
+# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
+
+#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
+#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
+#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
+
+#define DP_SINK_COUNT 0x200
+/* prior to 1.2 bit 7 was reserved mbz */
+# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
+# define DP_SINK_CP_READY (1 << 6)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_MCCS_IRQ (1 << 3)
+# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */
+# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+
+#define DP_TEST_CRC_R_CR 0x240
+#define DP_TEST_CRC_G_Y 0x242
+#define DP_TEST_CRC_B_CB 0x244
+
+#define DP_TEST_SINK_MISC 0x246
+# define DP_TEST_CRC_SUPPORTED (1 << 5)
+# define DP_TEST_COUNT_MASK 0x7
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
+#define DP_TEST_EDID_CHECKSUM 0x261
+
+#define DP_TEST_SINK 0x270
+# define DP_TEST_SINK_START (1 << 0)
+
+#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
+# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
+# define DP_PAYLOAD_ACT_HANDLED (1 << 1)
+
+#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
+/* up to ID_SLOT_63 at 0x2ff */
+
+#define DP_SOURCE_OUI 0x300
+#define DP_SINK_OUI 0x400
+#define DP_BRANCH_OUI 0x500
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+# define DP_SET_POWER_MASK 0x3
+
+#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
+# define DP_EDP_11 0x00
+# define DP_EDP_12 0x01
+# define DP_EDP_13 0x02
+# define DP_EDP_14 0x03
+
+#define DP_EDP_GENERAL_CAP_1 0x701
+
+#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702
+
+#define DP_EDP_GENERAL_CAP_2 0x703
+
+#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
+
+#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720
+
+#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721
+
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
+#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
+
+#define DP_EDP_PWMGEN_BIT_COUNT 0x724
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725
+#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726
+
+#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727
+
+#define DP_EDP_BACKLIGHT_FREQ_SET 0x728
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c
+
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e
+#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f
+
+#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
+#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+
+#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
+#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+
+#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
+#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
+#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
+#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
+
+#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
+/* 0-5 sink count */
+# define DP_SINK_COUNT_CP_READY (1 << 6)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
+
+#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */
+
+#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */
+
+#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
+#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
+# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+
+/* DP 1.2 Sideband message defines */
+/* peer device type - DP 1.2a Table 2-92 */
+#define DP_PEER_DEVICE_NONE 0x0
+#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1
+#define DP_PEER_DEVICE_MST_BRANCHING 0x2
+#define DP_PEER_DEVICE_SST_SINK 0x3
+#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4
+
+/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */
+#define DP_LINK_ADDRESS 0x01
+#define DP_CONNECTION_STATUS_NOTIFY 0x02
+#define DP_ENUM_PATH_RESOURCES 0x10
+#define DP_ALLOCATE_PAYLOAD 0x11
+#define DP_QUERY_PAYLOAD 0x12
+#define DP_RESOURCE_STATUS_NOTIFY 0x13
+#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14
+#define DP_REMOTE_DPCD_READ 0x20
+#define DP_REMOTE_DPCD_WRITE 0x21
+#define DP_REMOTE_I2C_READ 0x22
+#define DP_REMOTE_I2C_WRITE 0x23
+#define DP_POWER_UP_PHY 0x24
+#define DP_POWER_DOWN_PHY 0x25
+#define DP_SINK_EVENT_NOTIFY 0x30
+#define DP_QUERY_STREAM_ENC_STATUS 0x38
+
+/* DP 1.2 MST sideband nak reasons - table 2.84 */
+#define DP_NAK_WRITE_FAILURE 0x01
+#define DP_NAK_INVALID_READ 0x02
+#define DP_NAK_CRC_FAILURE 0x03
+#define DP_NAK_BAD_PARAM 0x04
+#define DP_NAK_DEFER 0x05
+#define DP_NAK_LINK_FAILURE 0x06
+#define DP_NAK_NO_RESOURCES 0x07
+#define DP_NAK_DPCD_FAIL 0x08
+#define DP_NAK_I2C_NAK 0x09
+#define DP_NAK_ALLOCATE_FAIL 0x0a
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+#define DP_LINK_STATUS_SIZE 6
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define EDP_PSR_RECEIVER_CAP_SIZE 2
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+struct edp_sdp_header {
+ u8 HB0; /* Secondary Data Packet ID */
+ u8 HB1; /* Secondary Data Packet Type */
+ u8 HB2; /* 7:5 reserved, 4:0 revision number */
+ u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+} __packed;
+
+#define EDP_SDP_HEADER_REVISION_MASK 0x1F
+#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+
+struct edp_vsc_psr {
+ struct edp_sdp_header sdp_header;
+ u8 DB0; /* Stereo Interface */
+ u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
+ u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
+ u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
+ u8 DB4; /* CRC value bits 7:0 of the G or Y component */
+ u8 DB5; /* CRC value bits 15:8 of the G or Y component */
+ u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
+ u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
+ u8 DB8_31[24]; /* Reserved */
+} __packed;
+
+#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
+#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
+#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
+
+static inline int
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
+/*
+ * DisplayPort AUX channel
+ */
+
+/**
+ * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
+ * @address: address of the (first) register to access
+ * @request: contains the type of transaction (see DP_AUX_* macros)
+ * @reply: upon completion, contains the reply type of the transaction
+ * @buffer: pointer to a transmission or reception buffer
+ * @size: size of @buffer
+ */
+struct drm_dp_aux_msg {
+ unsigned int address;
+ u8 request;
+ u8 reply;
+ void *buffer;
+ size_t size;
+};
+
+/**
+ * struct drm_dp_aux - DisplayPort AUX channel
+ * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
+ * @ddc: I2C adapter that can be used for I2C-over-AUX communication
+ * @dev: pointer to struct device that is the parent for this AUX channel
+ * @hw_mutex: internal mutex used for locking transfers
+ * @transfer: transfers a message representing a single AUX transaction
+ *
+ * The .dev field should be set to a pointer to the device that implements
+ * the AUX channel.
+ *
+ * The .name field may be used to specify the name of the I2C adapter. If set to
+ * NULL, dev_name() of .dev will be used.
+ *
+ * Drivers provide a hardware-specific implementation of how transactions
+ * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
+ * structure describing the transaction is passed into this function. Upon
+ * success, the implementation should return the number of payload bytes
+ * that were transferred, or a negative error-code on failure. Helpers
+ * propagate errors from the .transfer() function, with the exception of
+ * the -EBUSY error, which causes a transaction to be retried. On a short,
+ * helpers will return -EPROTO to make it simpler to check for failure.
+ *
+ * An AUX channel can also be used to transport I2C messages to a sink. A
+ * typical application of that is to access an EDID that's present in the
+ * sink device. The .transfer() function can also be used to execute such
+ * transactions. The drm_dp_aux_register_i2c_bus() function registers an
+ * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
+ * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
+ * The I2C adapter uses long transfers by default; if a partial response is
+ * received, the adapter will drop down to the size given by the partial
+ * response for this transaction only.
+ *
+ * Note that the aux helper code assumes that the .transfer() function
+ * only modifies the reply field of the drm_dp_aux_msg structure. The
+ * retry logic and i2c helpers assume this is the case.
+ */
+struct drm_dp_aux {
+ const char *name;
+ struct i2c_adapter ddc;
+ struct device *dev;
+ struct mutex hw_mutex;
+ ssize_t (*transfer)(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg);
+ unsigned i2c_nack_count, i2c_defer_count;
+};
+
+ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+
+/**
+ * drm_dp_dpcd_readb() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure.
+ */
+static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_writeb() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure.
+ */
+static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write(aux, offset, &value, 1);
+}
+
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+ u8 status[DP_LINK_STATUS_SIZE]);
+
+/*
+ * DisplayPort link
+ */
+#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int num_lanes;
+ unsigned long capabilities;
+};
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+
+int drm_dp_aux_register(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister(struct drm_dp_aux *aux);
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
new file mode 100644
index 000000000..86d0b25ed
--- /dev/null
+++ b/include/drm/drm_dp_mst_helper.h
@@ -0,0 +1,519 @@
+/*
+ * Copyright © 2014 Red Hat.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#ifndef _DRM_DP_MST_HELPER_H_
+#define _DRM_DP_MST_HELPER_H_
+
+#include <linux/types.h>
+#include <drm/drm_dp_helper.h>
+
+struct drm_dp_mst_branch;
+
+/**
+ * struct drm_dp_vcpi - Virtual Channel Payload Identifier
+ * @vcpi: Virtual channel ID.
+ * @pbn: Payload Bandwidth Number for this channel
+ * @aligned_pbn: PBN aligned with slot size
+ * @num_slots: number of slots for this PBN
+ */
+struct drm_dp_vcpi {
+ int vcpi;
+ int pbn;
+ int aligned_pbn;
+ int num_slots;
+};
+
+/**
+ * struct drm_dp_mst_port - MST port
+ * @kref: reference count for this port.
+ * @guid_valid: for DP 1.2 devices if we have validated the GUID.
+ * @guid: guid for DP 1.2 device on this port.
+ * @port_num: port number
+ * @input: if this port is an input port.
+ * @mcs: message capability status - DP 1.2 spec.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2
+ * @pdt: Peer Device Type
+ * @ldps: Legacy Device Plug Status
+ * @dpcd_rev: DPCD revision of device on this port
+ * @num_sdp_streams: Number of simultaneous streams
+ * @num_sdp_stream_sinks: Number of stream sinks
+ * @available_pbn: Available bandwidth for this port.
+ * @next: link to next port on this branch device
+ * @mstb: branch device attach below this port
+ * @aux: i2c aux transport to talk to device connected to this port.
+ * @parent: branch device parent of this port
+ * @vcpi: Virtual Channel Payload info for this port.
+ * @connector: DRM connector this port is connected to.
+ * @mgr: topology manager this port lives under.
+ *
+ * This structure represents an MST port endpoint on a device somewhere
+ * in the MST topology.
+ */
+struct drm_dp_mst_port {
+ struct kref kref;
+
+ /* if dpcd 1.2 device is on this port - its GUID info */
+ bool guid_valid;
+ u8 guid[16];
+
+ u8 port_num;
+ bool input;
+ bool mcs;
+ bool ddps;
+ u8 pdt;
+ bool ldps;
+ u8 dpcd_rev;
+ u8 num_sdp_streams;
+ u8 num_sdp_stream_sinks;
+ uint16_t available_pbn;
+ struct list_head next;
+ struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
+ struct drm_dp_aux aux; /* i2c bus for this port? */
+ struct drm_dp_mst_branch *parent;
+
+ struct drm_dp_vcpi vcpi;
+ struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ struct edid *cached_edid; /* for DP logical ports - make tiling work */
+};
+
+/**
+ * struct drm_dp_mst_branch - MST branch device.
+ * @kref: reference count for this port.
+ * @rad: Relative Address to talk to this branch device.
+ * @lct: Link count total to talk to this branch device.
+ * @num_ports: number of ports on the branch.
+ * @msg_slots: one bit per transmitted msg slot.
+ * @ports: linked list of ports on this branch.
+ * @port_parent: pointer to the port parent, NULL if toplevel.
+ * @mgr: topology manager for this branch device.
+ * @tx_slots: transmission slots for this device.
+ * @last_seqno: last sequence number used to talk to this.
+ * @link_address_sent: if a link address message has been sent to this device yet.
+ *
+ * This structure represents an MST branch device, there is one
+ * primary branch device at the root, along with any others connected
+ * to downstream ports
+ */
+struct drm_dp_mst_branch {
+ struct kref kref;
+ u8 rad[8];
+ u8 lct;
+ int num_ports;
+
+ int msg_slots;
+ struct list_head ports;
+
+ /* list of tx ops queue for this port */
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ /* slots are protected by mstb->mgr->qlock */
+ struct drm_dp_sideband_msg_tx *tx_slots[2];
+ int last_seqno;
+ bool link_address_sent;
+};
+
+
+/* sideband msg header - not bit struct */
+struct drm_dp_sideband_msg_hdr {
+ u8 lct;
+ u8 lcr;
+ u8 rad[8];
+ bool broadcast;
+ bool path_msg;
+ u8 msg_len;
+ bool somt;
+ bool eomt;
+ bool seqno;
+};
+
+struct drm_dp_nak_reply {
+ u8 guid[16];
+ u8 reason;
+ u8 nak_data;
+};
+
+struct drm_dp_link_address_ack_reply {
+ u8 guid[16];
+ u8 nports;
+ struct drm_dp_link_addr_reply_port {
+ bool input_port;
+ u8 peer_device_type;
+ u8 port_number;
+ bool mcs;
+ bool ddps;
+ bool legacy_device_plug_status;
+ u8 dpcd_revision;
+ u8 peer_guid[16];
+ u8 num_sdp_streams;
+ u8 num_sdp_stream_sinks;
+ } ports[16];
+};
+
+struct drm_dp_remote_dpcd_read_ack_reply {
+ u8 port_number;
+ u8 num_bytes;
+ u8 bytes[255];
+};
+
+struct drm_dp_remote_dpcd_write_ack_reply {
+ u8 port_number;
+};
+
+struct drm_dp_remote_dpcd_write_nak_reply {
+ u8 port_number;
+ u8 reason;
+ u8 bytes_written_before_failure;
+};
+
+struct drm_dp_remote_i2c_read_ack_reply {
+ u8 port_number;
+ u8 num_bytes;
+ u8 bytes[255];
+};
+
+struct drm_dp_remote_i2c_read_nak_reply {
+ u8 port_number;
+ u8 nak_reason;
+ u8 i2c_nak_transaction;
+};
+
+struct drm_dp_remote_i2c_write_ack_reply {
+ u8 port_number;
+};
+
+
+struct drm_dp_sideband_msg_rx {
+ u8 chunk[48];
+ u8 msg[256];
+ u8 curchunk_len;
+ u8 curchunk_idx; /* chunk we are parsing now */
+ u8 curchunk_hdrlen;
+ u8 curlen; /* total length of the msg */
+ bool have_somt;
+ bool have_eomt;
+ struct drm_dp_sideband_msg_hdr initial_hdr;
+};
+
+
+struct drm_dp_allocate_payload {
+ u8 port_number;
+ u8 number_sdp_streams;
+ u8 vcpi;
+ u16 pbn;
+ u8 sdp_stream_sink[8];
+};
+
+struct drm_dp_allocate_payload_ack_reply {
+ u8 port_number;
+ u8 vcpi;
+ u16 allocated_pbn;
+};
+
+struct drm_dp_connection_status_notify {
+ u8 guid[16];
+ u8 port_number;
+ bool legacy_device_plug_status;
+ bool displayport_device_plug_status;
+ bool message_capability_status;
+ bool input_port;
+ u8 peer_device_type;
+};
+
+struct drm_dp_remote_dpcd_read {
+ u8 port_number;
+ u32 dpcd_address;
+ u8 num_bytes;
+};
+
+struct drm_dp_remote_dpcd_write {
+ u8 port_number;
+ u32 dpcd_address;
+ u8 num_bytes;
+ u8 *bytes;
+};
+
+struct drm_dp_remote_i2c_read {
+ u8 num_transactions;
+ u8 port_number;
+ struct {
+ u8 i2c_dev_id;
+ u8 num_bytes;
+ u8 *bytes;
+ u8 no_stop_bit;
+ u8 i2c_transaction_delay;
+ } transactions[4];
+ u8 read_i2c_device_id;
+ u8 num_bytes_read;
+};
+
+struct drm_dp_remote_i2c_write {
+ u8 port_number;
+ u8 write_i2c_device_id;
+ u8 num_bytes;
+ u8 *bytes;
+};
+
+/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_req {
+ u8 port_number;
+};
+
+struct drm_dp_enum_path_resources_ack_reply {
+ u8 port_number;
+ u16 full_payload_bw_number;
+ u16 avail_payload_bw_number;
+};
+
+/* covers POWER_DOWN_PHY, POWER_UP_PHY */
+struct drm_dp_port_number_rep {
+ u8 port_number;
+};
+
+struct drm_dp_query_payload {
+ u8 port_number;
+ u8 vcpi;
+};
+
+struct drm_dp_resource_status_notify {
+ u8 port_number;
+ u8 guid[16];
+ u16 available_pbn;
+};
+
+struct drm_dp_query_payload_ack_reply {
+ u8 port_number;
+ u8 allocated_pbn;
+};
+
+struct drm_dp_sideband_msg_req_body {
+ u8 req_type;
+ union ack_req {
+ struct drm_dp_connection_status_notify conn_stat;
+ struct drm_dp_port_number_req port_num;
+ struct drm_dp_resource_status_notify resource_stat;
+
+ struct drm_dp_query_payload query_payload;
+ struct drm_dp_allocate_payload allocate_payload;
+
+ struct drm_dp_remote_dpcd_read dpcd_read;
+ struct drm_dp_remote_dpcd_write dpcd_write;
+
+ struct drm_dp_remote_i2c_read i2c_read;
+ struct drm_dp_remote_i2c_write i2c_write;
+ } u;
+};
+
+struct drm_dp_sideband_msg_reply_body {
+ u8 reply_type;
+ u8 req_type;
+ union ack_replies {
+ struct drm_dp_nak_reply nak;
+ struct drm_dp_link_address_ack_reply link_addr;
+ struct drm_dp_port_number_rep port_number;
+
+ struct drm_dp_enum_path_resources_ack_reply path_resources;
+ struct drm_dp_allocate_payload_ack_reply allocate_payload;
+ struct drm_dp_query_payload_ack_reply query_payload;
+
+ struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
+ struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
+ struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
+
+ struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
+ struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
+ struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
+ } u;
+};
+
+/* msg is queued to be put into a slot */
+#define DRM_DP_SIDEBAND_TX_QUEUED 0
+/* msg has started transmitting on a slot - still on msgq */
+#define DRM_DP_SIDEBAND_TX_START_SEND 1
+/* msg has finished transmitting on a slot - removed from msgq only in slot */
+#define DRM_DP_SIDEBAND_TX_SENT 2
+/* msg has received a response - removed from slot */
+#define DRM_DP_SIDEBAND_TX_RX 3
+#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
+
+struct drm_dp_sideband_msg_tx {
+ u8 msg[256];
+ u8 chunk[48];
+ u8 cur_offset;
+ u8 cur_len;
+ struct drm_dp_mst_branch *dst;
+ struct list_head next;
+ int seqno;
+ int state;
+ bool path_msg;
+ struct drm_dp_sideband_msg_reply_body reply;
+};
+
+/* sideband msg handler */
+struct drm_dp_mst_topology_mgr;
+struct drm_dp_mst_topology_cbs {
+ /* create a connector for a port */
+ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+ void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector);
+ void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
+
+};
+
+#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
+
+#define DP_PAYLOAD_LOCAL 1
+#define DP_PAYLOAD_REMOTE 2
+#define DP_PAYLOAD_DELETE_LOCAL 3
+
+struct drm_dp_payload {
+ int payload_state;
+ int start_slot;
+ int num_slots;
+ int vcpi;
+};
+
+/**
+ * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
+ * @dev: device pointer for adding i2c devices etc.
+ * @cbs: callbacks for connector addition and destruction.
+ * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
+ * @aux: aux channel for the DP connector.
+ * @max_payloads: maximum number of payloads the GPU can generate.
+ * @conn_base_id: DRM connector ID this mgr is connected to.
+ * @down_rep_recv: msg receiver state for down replies.
+ * @up_req_recv: msg receiver state for up requests.
+ * @lock: protects mst state, primary, guid, dpcd.
+ * @mst_state: if this manager is enabled for an MST capable port.
+ * @mst_primary: pointer to the primary branch device.
+ * @guid_valid: GUID valid for the primary branch device.
+ * @guid: GUID for primary port.
+ * @dpcd: cache of DPCD for primary port.
+ * @pbn_div: PBN to slots divisor.
+ *
+ * This struct represents the toplevel displayport MST topology manager.
+ * There should be one instance of this for every MST capable DP connector
+ * on the GPU.
+ */
+struct drm_dp_mst_topology_mgr {
+
+ struct device *dev;
+ struct drm_dp_mst_topology_cbs *cbs;
+ int max_dpcd_transaction_bytes;
+ struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+ int max_payloads;
+ int conn_base_id;
+
+ /* only ever accessed from the workqueue - which should be serialised */
+ struct drm_dp_sideband_msg_rx down_rep_recv;
+ struct drm_dp_sideband_msg_rx up_req_recv;
+
+ /* pointer to info about the initial MST device */
+ struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+
+ bool mst_state;
+ struct drm_dp_mst_branch *mst_primary;
+ /* primary MST device GUID */
+ bool guid_valid;
+ u8 guid[16];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 sink_count;
+ int pbn_div;
+ int total_slots;
+ int avail_slots;
+ int total_pbn;
+
+ /* messages to be transmitted */
+ /* qlock protects the upq/downq and in_progress,
+ the mstb tx_slots and txmsg->state once they are queued */
+ struct mutex qlock;
+ struct list_head tx_msg_downq;
+ struct list_head tx_msg_upq;
+ bool tx_down_in_progress;
+ bool tx_up_in_progress;
+
+ /* payload info + lock for it */
+ struct mutex payload_lock;
+ struct drm_dp_vcpi **proposed_vcpis;
+ struct drm_dp_payload *payloads;
+ unsigned long payload_mask;
+ unsigned long vcpi_mask;
+
+ wait_queue_head_t tx_waitq;
+ struct work_struct work;
+
+ struct work_struct tx_work;
+
+ struct list_head destroy_connector_list;
+ struct mutex destroy_connector_lock;
+ struct work_struct destroy_connector_work;
+};
+
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
+
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
+
+
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+
+
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
+
+
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+int drm_dp_calc_pbn_mode(int clock, int bpp);
+
+
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+
+
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+
+
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+ int pbn);
+
+
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+
+
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr);
+
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
new file mode 100644
index 000000000..799050198
--- /dev/null
+++ b/include/drm/drm_edid.h
@@ -0,0 +1,393 @@
+/*
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_EDID_H__
+#define __DRM_EDID_H__
+
+#include <linux/types.h>
+
+#define EDID_LENGTH 128
+#define DDC_ADDR 0x50
+#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
+
+#define CEA_EXT 0x02
+#define VTB_EXT 0x10
+#define DI_EXT 0x40
+#define LS_EXT 0x50
+#define MI_EXT 0x60
+#define DISPLAYID_EXT 0x70
+
+struct est_timings {
+ u8 t1;
+ u8 t2;
+ u8 mfg_rsvd;
+} __attribute__((packed));
+
+/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
+#define EDID_TIMING_ASPECT_SHIFT 6
+#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
+
+/* need to add 60 */
+#define EDID_TIMING_VFREQ_SHIFT 0
+#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
+
+struct std_timing {
+ u8 hsize; /* need to multiply by 8 then add 248 */
+ u8 vfreq_aspect;
+} __attribute__((packed));
+
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
+
+/* If detailed data is pixel timing */
+struct detailed_pixel_timing {
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hactive_hblank_hi;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vactive_vblank_hi;
+ u8 hsync_offset_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_offset_pulse_width_lo;
+ u8 hsync_vsync_offset_pulse_width_hi;
+ u8 width_mm_lo;
+ u8 height_mm_lo;
+ u8 width_height_mm_hi;
+ u8 hborder;
+ u8 vborder;
+ u8 misc;
+} __attribute__((packed));
+
+/* If it's not pixel timing, it'll be one of the below */
+struct detailed_data_string {
+ u8 str[13];
+} __attribute__((packed));
+
+struct detailed_data_monitor_range {
+ u8 min_vfreq;
+ u8 max_vfreq;
+ u8 min_hfreq_khz;
+ u8 max_hfreq_khz;
+ u8 pixel_clock_mhz; /* need to multiply by 10 */
+ u8 flags;
+ union {
+ struct {
+ u8 reserved;
+ u8 hfreq_start_khz; /* need to multiply by 2 */
+ u8 c; /* need to divide by 2 */
+ __le16 m;
+ u8 k;
+ u8 j; /* need to divide by 2 */
+ } __attribute__((packed)) gtf2;
+ struct {
+ u8 version;
+ u8 data1; /* high 6 bits: extra clock resolution */
+ u8 data2; /* plus low 2 of above: max hactive */
+ u8 supported_aspects;
+ u8 flags; /* preferred aspect and blanking support */
+ u8 supported_scalings;
+ u8 preferred_refresh;
+ } __attribute__((packed)) cvt;
+ } formula;
+} __attribute__((packed));
+
+struct detailed_data_wpindex {
+ u8 white_yx_lo; /* Lower 2 bits each */
+ u8 white_x_hi;
+ u8 white_y_hi;
+ u8 gamma; /* need to divide by 100 then add 1 */
+} __attribute__((packed));
+
+struct detailed_data_color_point {
+ u8 windex1;
+ u8 wpindex1[3];
+ u8 windex2;
+ u8 wpindex2[3];
+} __attribute__((packed));
+
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+
+struct detailed_non_pixel {
+ u8 pad1;
+ u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+ fb=color point data, fa=standard timing data,
+ f9=undefined, f8=mfg. reserved */
+ u8 pad2;
+ union {
+ struct detailed_data_string str;
+ struct detailed_data_monitor_range range;
+ struct detailed_data_wpindex color;
+ struct std_timing timings[6];
+ struct cvt_timing cvt[4];
+ } data;
+} __attribute__((packed));
+
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
+#define EDID_DETAIL_STD_MODES 0xfa
+#define EDID_DETAIL_MONITOR_CPDATA 0xfb
+#define EDID_DETAIL_MONITOR_NAME 0xfc
+#define EDID_DETAIL_MONITOR_RANGE 0xfd
+#define EDID_DETAIL_MONITOR_STRING 0xfe
+#define EDID_DETAIL_MONITOR_SERIAL 0xff
+
+struct detailed_timing {
+ __le16 pixel_clock; /* need to multiply by 10 KHz */
+ union {
+ struct detailed_pixel_timing pixel_data;
+ struct detailed_non_pixel other_data;
+ } data;
+} __attribute__((packed));
+
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
+#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7)
+#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
+#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
+#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
+#define DRM_EDID_DIGITAL_TYPE_DVI (1)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
+#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
+#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
+#define DRM_EDID_DIGITAL_TYPE_DP (5)
+
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
+/* If analog */
+#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
+/* If digital */
+#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
+#define DRM_EDID_FEATURE_RGB (0 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
+#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
+
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
+
+#define DRM_EDID_HDMI_DC_48 (1 << 6)
+#define DRM_EDID_HDMI_DC_36 (1 << 5)
+#define DRM_EDID_HDMI_DC_30 (1 << 4)
+#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
+
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
+struct edid {
+ u8 header[8];
+ /* Vendor & product info */
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ /* EDID version */
+ u8 version;
+ u8 revision;
+ /* Display info: */
+ u8 input;
+ u8 width_cm;
+ u8 height_cm;
+ u8 gamma;
+ u8 features;
+ /* Color characteristics */
+ u8 red_green_lo;
+ u8 black_white_lo;
+ u8 red_x;
+ u8 red_y;
+ u8 green_x;
+ u8 green_y;
+ u8 blue_x;
+ u8 blue_y;
+ u8 white_x;
+ u8 white_y;
+ /* Est. timings and mfg rsvd timings*/
+ struct est_timings established_timings;
+ /* Standard timings 1-8*/
+ struct std_timing standard_timings[8];
+ /* Detailing timings 1-4 */
+ struct detailed_timing detailed_timings[4];
+ /* Number of 128 byte ext. blocks */
+ u8 extensions;
+ /* Checksum */
+ u8 checksum;
+} __attribute__((packed));
+
+#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+
+/* Short Audio Descriptor */
+struct cea_sad {
+ u8 format;
+ u8 channels; /* max number of channels - 1 */
+ u8 freq;
+ u8 byte2; /* meaning depends on format */
+};
+
+struct drm_encoder;
+struct drm_connector;
+struct drm_display_mode;
+struct hdmi_avi_infoframe;
+struct hdmi_vendor_infoframe;
+
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+int drm_load_edid_firmware(struct drm_connector *connector);
+
+int
+drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ const struct drm_display_mode *mode);
+int
+drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ const struct drm_display_mode *mode);
+
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const uint8_t *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const uint8_t *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+struct edid *drm_do_get_edid(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data);
+
+#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
new file mode 100644
index 000000000..8b9cc3671
--- /dev/null
+++ b/include/drm/drm_encoder_slave.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_ENCODER_SLAVE_H__
+#define __DRM_ENCODER_SLAVE_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+/**
+ * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
+ * @set_config: Initialize any encoder-specific modesetting parameters.
+ * The meaning of the @params parameter is implementation
+ * dependent. It will usually be a structure with DVO port
+ * data format settings or timings. It's not required for
+ * the new parameters to take effect until the next mode
+ * is set.
+ *
+ * Most of its members are analogous to the function pointers in
+ * &drm_encoder_helper_funcs and they can optionally be used to
+ * initialize the latter. Connector-like methods (e.g. @get_modes and
+ * @set_property) will typically be wrapped around and only be called
+ * if the encoder is the currently selected one for the connector.
+ */
+struct drm_encoder_slave_funcs {
+ void (*set_config)(struct drm_encoder *encoder,
+ void *params);
+
+ void (*destroy)(struct drm_encoder *encoder);
+ void (*dpms)(struct drm_encoder *encoder, int mode);
+ void (*save)(struct drm_encoder *encoder);
+ void (*restore)(struct drm_encoder *encoder);
+ bool (*mode_fixup)(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ int (*mode_valid)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+ void (*mode_set)(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ enum drm_connector_status (*detect)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ int (*get_modes)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ int (*create_resources)(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+ int (*set_property)(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
+};
+
+/**
+ * struct drm_encoder_slave - Slave encoder struct
+ * @base: DRM encoder object.
+ * @slave_funcs: Slave encoder callbacks.
+ * @slave_priv: Slave encoder private data.
+ * @bus_priv: Bus specific data.
+ *
+ * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the
+ * ones in @base. The former are never actually called by the common
+ * CRTC code, it's just a convenience for splitting the encoder
+ * functions in an upper, GPU-specific layer and a (hopefully)
+ * GPU-agnostic lower layer: It's the GPU driver responsibility to
+ * call the slave methods when appropriate.
+ *
+ * drm_i2c_encoder_init() provides a way to get an implementation of
+ * this.
+ */
+struct drm_encoder_slave {
+ struct drm_encoder base;
+
+ struct drm_encoder_slave_funcs *slave_funcs;
+ void *slave_priv;
+ void *bus_priv;
+};
+#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base)
+
+int drm_i2c_encoder_init(struct drm_device *dev,
+ struct drm_encoder_slave *encoder,
+ struct i2c_adapter *adap,
+ const struct i2c_board_info *info);
+
+
+/**
+ * struct drm_i2c_encoder_driver
+ *
+ * Describes a device driver for an encoder connected to the GPU
+ * through an I2C bus. In addition to the entry points in @i2c_driver
+ * an @encoder_init function should be provided. It will be called to
+ * give the driver an opportunity to allocate any per-encoder data
+ * structures and to initialize the @slave_funcs and (optionally)
+ * @slave_priv members of @encoder.
+ */
+struct drm_i2c_encoder_driver {
+ struct i2c_driver i2c_driver;
+
+ int (*encoder_init)(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder);
+
+};
+#define to_drm_i2c_encoder_driver(x) container_of((x), \
+ struct drm_i2c_encoder_driver, \
+ i2c_driver)
+
+/**
+ * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
+ */
+static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder)
+{
+ return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv;
+}
+
+/**
+ * drm_i2c_encoder_register - Register an I2C encoder driver
+ * @owner: Module containing the driver.
+ * @driver: Driver to be registered.
+ */
+static inline int drm_i2c_encoder_register(struct module *owner,
+ struct drm_i2c_encoder_driver *driver)
+{
+ return i2c_register_driver(owner, &driver->i2c_driver);
+}
+
+/**
+ * drm_i2c_encoder_unregister - Unregister an I2C encoder driver
+ * @driver: Driver to be unregistered.
+ */
+static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver)
+{
+ i2c_del_driver(&driver->i2c_driver);
+}
+
+void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
+
+
+/*
+ * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
+ */
+
+void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode);
+bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void drm_i2c_encoder_prepare(struct drm_encoder *encoder);
+void drm_i2c_encoder_commit(struct drm_encoder *encoder);
+void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+void drm_i2c_encoder_save(struct drm_encoder *encoder);
+void drm_i2c_encoder_restore(struct drm_encoder *encoder);
+
+
+#endif
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
new file mode 100644
index 000000000..c54cf3d4a
--- /dev/null
+++ b/include/drm/drm_fb_cma_helper.h
@@ -0,0 +1,31 @@
+#ifndef __DRM_FB_CMA_HELPER_H__
+#define __DRM_FB_CMA_HELPER_H__
+
+struct drm_fbdev_cma;
+struct drm_gem_cma_object;
+
+struct drm_framebuffer;
+struct drm_device;
+struct drm_file;
+struct drm_mode_fb_cmd2;
+
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count);
+void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
+
+void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+
+#ifdef CONFIG_DEBUG_FS
+int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
+#endif
+
+#endif
+
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
new file mode 100644
index 000000000..0dfd94def
--- /dev/null
+++ b/include/drm/drm_fb_helper.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2006-2009 Red Hat Inc.
+ * Copyright (c) 2006-2008 Intel Corporation
+ * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
+ *
+ * DRM framebuffer helper functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+#ifndef DRM_FB_HELPER_H
+#define DRM_FB_HELPER_H
+
+struct drm_fb_helper;
+
+#include <linux/kgdb.h>
+
+struct drm_fb_offset {
+ int x, y;
+};
+
+struct drm_fb_helper_crtc {
+ struct drm_mode_set mode_set;
+ struct drm_display_mode *desired_mode;
+ int x, y;
+};
+
+/**
+ * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
+ * @fb_width: fbdev width
+ * @fb_height: fbdev height
+ * @surface_width: scanout buffer width
+ * @surface_height: scanout buffer height
+ * @surface_bpp: scanout buffer bpp
+ * @surface_depth: scanout buffer depth
+ *
+ * Note that the scanout surface width/height may be larger than the fbdev
+ * width/height. In case of multiple displays, the scanout surface is sized
+ * according to the largest width/height (so it is large enough for all CRTCs
+ * to scanout). But the fbdev width/height is sized to the minimum width/
+ * height of all the displays. This ensures that fbcon fits on the smallest
+ * of the attached displays.
+ *
+ * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
+ * rather than the surface size.
+ */
+struct drm_fb_helper_surface_size {
+ u32 fb_width;
+ u32 fb_height;
+ u32 surface_width;
+ u32 surface_height;
+ u32 surface_bpp;
+ u32 surface_depth;
+};
+
+/**
+ * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
+ * @gamma_set: Set the given gamma lut register on the given crtc.
+ * @gamma_get: Read the given gamma lut register on the given crtc, used to
+ * save the current lut when force-restoring the fbdev for e.g.
+ * kdbg.
+ * @fb_probe: Driver callback to allocate and initialize the fbdev info
+ * structure. Furthermore it also needs to allocate the drm
+ * framebuffer used to back the fbdev.
+ * @initial_config: Setup an initial fbdev display configuration
+ *
+ * Driver callbacks used by the fbdev emulation helper library.
+ */
+struct drm_fb_helper_funcs {
+ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ int (*fb_probe)(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+ bool (*initial_config)(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height);
+};
+
+struct drm_fb_helper_connector {
+ struct drm_connector *connector;
+};
+
+struct drm_fb_helper {
+ struct drm_framebuffer *fb;
+ struct drm_device *dev;
+ int crtc_count;
+ struct drm_fb_helper_crtc *crtc_info;
+ int connector_count;
+ int connector_info_alloc_count;
+ struct drm_fb_helper_connector **connector_info;
+ const struct drm_fb_helper_funcs *funcs;
+ struct fb_info *fbdev;
+ u32 pseudo_palette[17];
+ struct list_head kernel_fb_list;
+
+ /* we got a hotplug but fbdev wasn't running the console
+ delay until next set_par */
+ bool delayed_hotplug;
+};
+
+void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ const struct drm_fb_helper_funcs *funcs);
+int drm_fb_helper_init(struct drm_device *dev,
+ struct drm_fb_helper *helper, int crtc_count,
+ int max_conn);
+void drm_fb_helper_fini(struct drm_fb_helper *helper);
+int drm_fb_helper_blank(int blank, struct fb_info *info);
+int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+int drm_fb_helper_set_par(struct fb_info *info);
+int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
+
+bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height);
+void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth);
+
+int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_debug_enter(struct fb_info *info);
+int drm_fb_helper_debug_leave(struct fb_info *info);
+struct drm_display_mode *
+drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector,
+ int width, int height);
+struct drm_display_mode *
+drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
+ int width, int height);
+
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector);
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector);
+#endif
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
new file mode 100644
index 000000000..d639049a6
--- /dev/null
+++ b/include/drm/drm_fixed.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Christian König
+ */
+#ifndef DRM_FIXED_H
+#define DRM_FIXED_H
+
+#include <linux/math64.h>
+
+typedef union dfixed {
+ u32 full;
+} fixed20_12;
+
+
+#define dfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
+#define dfixed_const_half(A) (u32)(((A) << 12) + 2048)
+#define dfixed_const_666(A) (u32)(((A) << 12) + 2731)
+#define dfixed_const_8(A) (u32)(((A) << 12) + 3277)
+#define dfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
+#define dfixed_init(A) { .full = dfixed_const((A)) }
+#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
+#define dfixed_trunc(A) ((A).full >> 12)
+#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
+
+static inline u32 dfixed_floor(fixed20_12 A)
+{
+ u32 non_frac = dfixed_trunc(A);
+
+ return dfixed_const(non_frac);
+}
+
+static inline u32 dfixed_ceil(fixed20_12 A)
+{
+ u32 non_frac = dfixed_trunc(A);
+
+ if (A.full > dfixed_const(non_frac))
+ return dfixed_const(non_frac + 1);
+ else
+ return dfixed_const(non_frac);
+}
+
+static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+{
+ u64 tmp = ((u64)A.full << 13);
+
+ do_div(tmp, B.full);
+ tmp += 1;
+ tmp /= 2;
+ return lower_32_bits(tmp);
+}
+
+#define DRM_FIXED_POINT 32
+#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
+#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
+#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
+
+static inline s64 drm_int2fixp(int a)
+{
+ return ((s64)a) << DRM_FIXED_POINT;
+}
+
+static inline int drm_fixp2int(int64_t a)
+{
+ return ((s64)a) >> DRM_FIXED_POINT;
+}
+
+static inline unsigned drm_fixp_msbset(int64_t a)
+{
+ unsigned shift, sign = (a >> 63) & 1;
+
+ for (shift = 62; shift > 0; --shift)
+ if (((a >> shift) & 1) != sign)
+ return shift;
+
+ return 0;
+}
+
+static inline s64 drm_fixp_mul(s64 a, s64 b)
+{
+ unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
+ s64 result;
+
+ if (shift > 61) {
+ shift = shift - 61;
+ a >>= (shift >> 1) + (shift & 1);
+ b >>= shift >> 1;
+ } else
+ shift = 0;
+
+ result = a * b;
+
+ if (shift > DRM_FIXED_POINT)
+ return result << (shift - DRM_FIXED_POINT);
+
+ if (shift < DRM_FIXED_POINT)
+ return result >> (DRM_FIXED_POINT - shift);
+
+ return result;
+}
+
+static inline s64 drm_fixp_div(s64 a, s64 b)
+{
+ unsigned shift = 62 - drm_fixp_msbset(a);
+ s64 result;
+
+ a <<= shift;
+
+ if (shift < DRM_FIXED_POINT)
+ b >>= (DRM_FIXED_POINT - shift);
+
+ result = div64_s64(a, b);
+
+ if (shift > DRM_FIXED_POINT)
+ return result >> (shift - DRM_FIXED_POINT);
+
+ return result;
+}
+
+static inline s64 drm_fixp_exp(s64 x)
+{
+ s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
+ s64 sum = DRM_FIXED_ONE, term, y = x;
+ u64 count = 1;
+
+ if (x < 0)
+ y = -1 * x;
+
+ term = y;
+
+ while (term >= tolerance) {
+ sum = sum + term;
+ count = count + 1;
+ term = drm_fixp_mul(term, div64_s64(y, count));
+ }
+
+ if (x < 0)
+ sum = drm_fixp_div(DRM_FIXED_ONE, sum);
+
+ return sum;
+}
+
+#endif
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
new file mode 100644
index 000000000..d387cf06a
--- /dev/null
+++ b/include/drm/drm_flip_work.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_FLIP_WORK_H
+#define DRM_FLIP_WORK_H
+
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+/**
+ * DOC: flip utils
+ *
+ * Util to queue up work to run from work-queue context after flip/vblank.
+ * Typically this can be used to defer unref of framebuffer's, cursor
+ * bo's, etc until after vblank. The APIs are all thread-safe.
+ * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
+ * in atomic context.
+ */
+
+struct drm_flip_work;
+
+/*
+ * drm_flip_func_t - callback function
+ *
+ * @work: the flip work
+ * @val: value queued via drm_flip_work_queue()
+ *
+ * Callback function to be called for each of the queue'd work items after
+ * drm_flip_work_commit() is called.
+ */
+typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
+
+/**
+ * struct drm_flip_task - flip work task
+ * @node: list entry element
+ * @data: data to pass to work->func
+ */
+struct drm_flip_task {
+ struct list_head node;
+ void *data;
+};
+
+/**
+ * struct drm_flip_work - flip work queue
+ * @name: debug name
+ * @func: callback fxn called for each committed item
+ * @worker: worker which calls @func
+ * @queued: queued tasks
+ * @commited: commited tasks
+ * @lock: lock to access queued and commited lists
+ */
+struct drm_flip_work {
+ const char *name;
+ drm_flip_func_t func;
+ struct work_struct worker;
+ struct list_head queued;
+ struct list_head commited;
+ spinlock_t lock;
+};
+
+struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
+void drm_flip_work_queue_task(struct drm_flip_work *work,
+ struct drm_flip_task *task);
+void drm_flip_work_queue(struct drm_flip_work *work, void *val);
+void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq);
+void drm_flip_work_init(struct drm_flip_work *work,
+ const char *name, drm_flip_func_t func);
+void drm_flip_work_cleanup(struct drm_flip_work *work);
+
+#endif /* DRM_FLIP_WORK_H */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
new file mode 100644
index 000000000..7a592d7e3
--- /dev/null
+++ b/include/drm/drm_gem.h
@@ -0,0 +1,185 @@
+#ifndef __DRM_GEM_H__
+#define __DRM_GEM_H__
+
+/*
+ * GEM Graphics Execution Manager Driver Interfaces
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /**
+ * handle_count - gem file_priv handle count of this object
+ *
+ * Each handle also holds a reference. Note that when the handle_count
+ * drops to 0 any global names (e.g. the id in the flink namespace) will
+ * be cleared.
+ *
+ * Protected by dev->object_name_lock.
+ * */
+ unsigned handle_count;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /* Mapping info for this object */
+ struct drm_vma_offset_node vma_node;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ /**
+ * dma_buf - dma buf associated with this GEM object
+ *
+ * Pointer to the dma-buf associated with this gem object (either
+ * through importing or exporting). We break the resulting reference
+ * loop when the last gem handle for this object is released.
+ *
+ * Protected by obj->object_name_lock
+ */
+ struct dma_buf *dma_buf;
+
+ /**
+ * import_attach - dma buf attachment backing this object
+ *
+ * Any foreign dma_buf imported as a gem object has this set to the
+ * attachment point for the device. This is invariant over the lifetime
+ * of a gem object.
+ *
+ * The driver's ->gem_free_object callback is responsible for cleaning
+ * up the dma_buf attachment and references acquired at import time.
+ *
+ * Note that the drm gem/prime core does not depend upon drivers setting
+ * this field any more. So for drivers where this doesn't make sense
+ * (e.g. virtual devices or a displaylink behind an usb bus) they can
+ * simply leave it as NULL.
+ */
+ struct dma_buf_attachment *import_attach;
+};
+
+void drm_gem_object_release(struct drm_gem_object *obj);
+void drm_gem_object_free(struct kref *kref);
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
+void drm_gem_vm_open(struct vm_area_struct *vma);
+void drm_gem_vm_close(struct vm_area_struct *vma);
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+ struct vm_area_struct *vma);
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+static inline void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
+
+static inline void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free);
+}
+
+static inline void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev;
+
+ if (!obj)
+ return;
+
+ dev = obj->dev;
+ if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
+ mutex_unlock(&dev->struct_mutex);
+ else
+ might_lock(&dev->struct_mutex);
+}
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
+
+
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+struct page **drm_gem_get_pages(struct drm_gem_object *obj);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+ struct drm_file *filp,
+ u32 handle);
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
new file mode 100644
index 000000000..acd6af8a8
--- /dev/null
+++ b/include/drm/drm_gem_cma_helper.h
@@ -0,0 +1,70 @@
+#ifndef __DRM_GEM_CMA_HELPER_H__
+#define __DRM_GEM_CMA_HELPER_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+
+/**
+ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
+ * @base: base GEM object
+ * @paddr: physical address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers
+ * @vaddr: kernel virtual address of the backing memory
+ */
+struct drm_gem_cma_object {
+ struct drm_gem_object base;
+ dma_addr_t paddr;
+ struct sg_table *sgt;
+
+ /* For objects with DMA memory allocated by GEM CMA */
+ void *vaddr;
+};
+
+static inline struct drm_gem_cma_object *
+to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
+{
+ return container_of(gem_obj, struct drm_gem_cma_object, base);
+}
+
+/* free GEM object */
+void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for DRM framebuffer */
+int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
+int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* map memory region for DRM framebuffer to user space */
+int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *drm, u32 handle,
+ u64 *offset);
+
+/* set vm_flags and we can change the VM attribute to other one at here */
+int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* allocate physical memory */
+struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+ size_t size);
+
+extern const struct vm_operations_struct drm_gem_cma_vm_ops;
+
+#ifdef CONFIG_DEBUG_FS
+void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
+#endif
+
+struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
+void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
new file mode 100644
index 000000000..a06805eaf
--- /dev/null
+++ b/include/drm/drm_global.h
@@ -0,0 +1,53 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _DRM_GLOBAL_H_
+#define _DRM_GLOBAL_H_
+enum drm_global_types {
+ DRM_GLOBAL_TTM_MEM = 0,
+ DRM_GLOBAL_TTM_BO,
+ DRM_GLOBAL_TTM_OBJECT,
+ DRM_GLOBAL_NUM
+};
+
+struct drm_global_reference {
+ enum drm_global_types global_type;
+ size_t size;
+ void *object;
+ int (*init) (struct drm_global_reference *);
+ void (*release) (struct drm_global_reference *);
+};
+
+extern void drm_global_init(void);
+extern void drm_global_release(void);
+extern int drm_global_item_ref(struct drm_global_reference *ref);
+extern void drm_global_item_unref(struct drm_global_reference *ref);
+
+#endif
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
new file mode 100644
index 000000000..fce2ef3fd
--- /dev/null
+++ b/include/drm/drm_hashtab.h
@@ -0,0 +1,79 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef DRM_HASHTAB_H
+#define DRM_HASHTAB_H
+
+#include <linux/list.h>
+
+#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+struct drm_hash_item {
+ struct hlist_node head;
+ unsigned long key;
+};
+
+struct drm_open_hash {
+ struct hlist_head *table;
+ u8 order;
+};
+
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
+
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
+
+#endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
new file mode 100644
index 000000000..3e698038d
--- /dev/null
+++ b/include/drm/drm_legacy.h
@@ -0,0 +1,203 @@
+#ifndef __DRM_DRM_LEGACY_H__
+#define __DRM_DRM_LEGACY_H__
+
+/*
+ * Legacy driver interfaces for the Direct Rendering Manager
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/*
+ * Legacy Support for palateontologic DRM drivers
+ *
+ * If you add a new driver and it uses any of these functions or structures,
+ * you're doing it terribly wrong.
+ */
+
+/**
+ * DMA buffer.
+ */
+struct drm_buf {
+ int idx; /**< Index into master buflist */
+ int total; /**< Buffer size */
+ int order; /**< log-base-2(total) */
+ int used; /**< Amount of buffer in use (for DMA) */
+ unsigned long offset; /**< Byte offset (used internally) */
+ void *address; /**< Address of buffer */
+ unsigned long bus_address; /**< Bus address of buffer */
+ struct drm_buf *next; /**< Kernel-only: used for free list */
+ __volatile__ int waiting; /**< On kernel DMA queue */
+ __volatile__ int pending; /**< On hardware DMA queue */
+ struct drm_file *file_priv; /**< Private of holding file descr */
+ int context; /**< Kernel queue for this buffer */
+ int while_locked; /**< Dispatch this buffer while locked */
+ enum {
+ DRM_LIST_NONE = 0,
+ DRM_LIST_FREE = 1,
+ DRM_LIST_WAIT = 2,
+ DRM_LIST_PEND = 3,
+ DRM_LIST_PRIO = 4,
+ DRM_LIST_RECLAIM = 5
+ } list; /**< Which list we're on */
+
+ int dev_priv_size; /**< Size of buffer private storage */
+ void *dev_private; /**< Per-buffer private storage */
+};
+
+typedef struct drm_dma_handle {
+ dma_addr_t busaddr;
+ void *vaddr;
+ size_t size;
+} drm_dma_handle_t;
+
+/**
+ * Buffer entry. There is one of this for each buffer size order.
+ */
+struct drm_buf_entry {
+ int buf_size; /**< size */
+ int buf_count; /**< number of buffers */
+ struct drm_buf *buflist; /**< buffer list */
+ int seg_count;
+ int page_order;
+ struct drm_dma_handle **seglist;
+
+ int low_mark; /**< Low water mark */
+ int high_mark; /**< High water mark */
+};
+
+/**
+ * DMA data.
+ */
+struct drm_device_dma {
+
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ int buf_count; /**< total number of buffers */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ int seg_count;
+ int page_count; /**< number of pages */
+ unsigned long *pagelist; /**< page list */
+ unsigned long byte_count;
+ enum {
+ _DRM_DMA_USE_AGP = 0x01,
+ _DRM_DMA_USE_SG = 0x02,
+ _DRM_DMA_USE_FB = 0x04,
+ _DRM_DMA_USE_PCI_RO = 0x08
+ } flags;
+
+};
+
+/**
+ * Scatter-gather memory.
+ */
+struct drm_sg_mem {
+ unsigned long handle;
+ void *virtual;
+ int pages;
+ struct page **pagelist;
+ dma_addr_t *busaddr;
+};
+
+/**
+ * Kernel side of a mapping
+ */
+struct drm_local_map {
+ resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
+ unsigned long size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ void *handle; /**< User-space: "Handle" to pass to mmap() */
+ /**< Kernel-space: kernel-virtual address */
+ int mtrr; /**< MTRR slot used */
+};
+
+typedef struct drm_local_map drm_local_map_t;
+
+/**
+ * Mappings list
+ */
+struct drm_map_list {
+ struct list_head head; /**< list head */
+ struct drm_hash_item hash;
+ struct drm_local_map *map; /**< mapping */
+ uint64_t user_token;
+ struct drm_master *master;
+};
+
+int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, struct drm_local_map **map_p);
+int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
+int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
+struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
+int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
+int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
+
+/**
+ * Test that the hardware lock is held by the caller, returning otherwise.
+ *
+ * \param dev DRM device.
+ * \param filp file pointer of the caller.
+ */
+#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
+do { \
+ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
+ _file_priv->master->lock.file_priv != _file_priv) { \
+ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
+ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+ _file_priv->master->lock.file_priv, _file_priv); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+void drm_legacy_idlelock_take(struct drm_lock_data *lock);
+void drm_legacy_idlelock_release(struct drm_lock_data *lock);
+
+/* drm_pci.c dma alloc wrappers */
+void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+
+/* drm_memory.c */
+void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
+void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
+void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
+
+static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
+ unsigned int token)
+{
+ struct drm_map_list *_entry;
+ list_for_each_entry(_entry, &dev->maplist, head)
+ if (_entry->user_token == token)
+ return _entry->map;
+ return NULL;
+}
+
+#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
new file mode 100644
index 000000000..19a240446
--- /dev/null
+++ b/include/drm/drm_mem_util.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ */
+#ifndef _DRM_MEM_UTIL_H_
+#define _DRM_MEM_UTIL_H_
+
+#include <linux/vmalloc.h>
+
+static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+{
+ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+ return kcalloc(nmemb, size, GFP_KERNEL);
+
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+}
+
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
+ if (size != 0 && nmemb > SIZE_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+ return kmalloc(nmemb * size, GFP_KERNEL);
+
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
+static __inline void drm_free_large(void *ptr)
+{
+ if (!is_vmalloc_addr(ptr))
+ return kfree(ptr);
+
+ vfree(ptr);
+}
+
+#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
new file mode 100644
index 000000000..f1d8d0dbb
--- /dev/null
+++ b/include/drm/drm_mipi_dsi.h
@@ -0,0 +1,259 @@
+/*
+ * MIPI DSI Bus
+ *
+ * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd.
+ * Andrzej Hajda <a.hajda@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRM_MIPI_DSI_H__
+#define __DRM_MIPI_DSI_H__
+
+#include <linux/device.h>
+
+struct mipi_dsi_host;
+struct mipi_dsi_device;
+
+/* request ACK from peripheral */
+#define MIPI_DSI_MSG_REQ_ACK BIT(0)
+/* use Low Power Mode to transmit message */
+#define MIPI_DSI_MSG_USE_LPM BIT(1)
+
+/**
+ * struct mipi_dsi_msg - read/write DSI buffer
+ * @channel: virtual channel id
+ * @type: payload data type
+ * @flags: flags controlling this message transmission
+ * @tx_len: length of @tx_buf
+ * @tx_buf: data to be written
+ * @rx_len: length of @rx_buf
+ * @rx_buf: data to be read, or NULL
+ */
+struct mipi_dsi_msg {
+ u8 channel;
+ u8 type;
+ u16 flags;
+
+ size_t tx_len;
+ const void *tx_buf;
+
+ size_t rx_len;
+ void *rx_buf;
+};
+
+bool mipi_dsi_packet_format_is_short(u8 type);
+bool mipi_dsi_packet_format_is_long(u8 type);
+
+/**
+ * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
+ * @size: size (in bytes) of the packet
+ * @header: the four bytes that make up the header (Data ID, Word Count or
+ * Packet Data, and ECC)
+ * @payload_length: number of bytes in the payload
+ * @payload: a pointer to a buffer containing the payload, if any
+ */
+struct mipi_dsi_packet {
+ size_t size;
+ u8 header[4];
+ size_t payload_length;
+ const u8 *payload;
+};
+
+int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
+ const struct mipi_dsi_msg *msg);
+
+/**
+ * struct mipi_dsi_host_ops - DSI bus operations
+ * @attach: attach DSI device to DSI host
+ * @detach: detach DSI device from DSI host
+ * @transfer: transmit a DSI packet
+ *
+ * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
+ * structures. This structure contains information about the type of packet
+ * being transmitted as well as the transmit and receive buffers. When an
+ * error is encountered during transmission, this function will return a
+ * negative error code. On success it shall return the number of bytes
+ * transmitted for write packets or the number of bytes received for read
+ * packets.
+ *
+ * Note that typically DSI packet transmission is atomic, so the .transfer()
+ * function will seldomly return anything other than the number of bytes
+ * contained in the transmit buffer on success.
+ */
+struct mipi_dsi_host_ops {
+ int (*attach)(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi);
+ ssize_t (*transfer)(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg);
+};
+
+/**
+ * struct mipi_dsi_host - DSI host device
+ * @dev: driver model device node for this DSI host
+ * @ops: DSI host operations
+ */
+struct mipi_dsi_host {
+ struct device *dev;
+ const struct mipi_dsi_host_ops *ops;
+};
+
+int mipi_dsi_host_register(struct mipi_dsi_host *host);
+void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+
+/* DSI mode flags */
+
+/* video mode */
+#define MIPI_DSI_MODE_VIDEO BIT(0)
+/* video burst mode */
+#define MIPI_DSI_MODE_VIDEO_BURST BIT(1)
+/* video pulse mode */
+#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2)
+/* enable auto vertical count mode */
+#define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3)
+/* enable hsync-end packets in vsync-pulse and v-porch area */
+#define MIPI_DSI_MODE_VIDEO_HSE BIT(4)
+/* disable hfront-porch area */
+#define MIPI_DSI_MODE_VIDEO_HFP BIT(5)
+/* disable hback-porch area */
+#define MIPI_DSI_MODE_VIDEO_HBP BIT(6)
+/* disable hsync-active area */
+#define MIPI_DSI_MODE_VIDEO_HSA BIT(7)
+/* flush display FIFO on vsync pulse */
+#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
+/* disable EoT packets in HS mode */
+#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
+/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
+#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
+/* transmit data in low power */
+#define MIPI_DSI_MODE_LPM BIT(11)
+
+enum mipi_dsi_pixel_format {
+ MIPI_DSI_FMT_RGB888,
+ MIPI_DSI_FMT_RGB666,
+ MIPI_DSI_FMT_RGB666_PACKED,
+ MIPI_DSI_FMT_RGB565,
+};
+
+/**
+ * struct mipi_dsi_device - DSI peripheral device
+ * @host: DSI host for this peripheral
+ * @dev: driver model device node for this peripheral
+ * @channel: virtual channel assigned to the peripheral
+ * @format: pixel format for video mode
+ * @lanes: number of active data lanes
+ * @mode_flags: DSI operation mode related flags
+ */
+struct mipi_dsi_device {
+ struct mipi_dsi_host *host;
+ struct device dev;
+
+ unsigned int channel;
+ unsigned int lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+};
+
+static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
+{
+ return container_of(dev, struct mipi_dsi_device, dev);
+}
+
+struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
+int mipi_dsi_attach(struct mipi_dsi_device *dsi);
+int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
+ u16 value);
+
+ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
+ size_t size);
+ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
+ size_t num_params, void *data, size_t size);
+
+/**
+ * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
+ * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
+ * information only
+ * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
+ * V-Blanking and H-Blanking information
+ */
+enum mipi_dsi_dcs_tear_mode {
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK,
+ MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
+};
+
+#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
+#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3)
+#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4)
+#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
+#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6)
+
+ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
+ const void *data, size_t len);
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
+ const void *data, size_t len);
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
+ size_t len);
+int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
+int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
+int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
+ u16 end);
+int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
+int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
+ enum mipi_dsi_dcs_tear_mode mode);
+int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
+
+/**
+ * struct mipi_dsi_driver - DSI driver
+ * @driver: device driver model driver
+ * @probe: callback for device binding
+ * @remove: callback for device unbinding
+ * @shutdown: called at shutdown time to quiesce the device
+ */
+struct mipi_dsi_driver {
+ struct device_driver driver;
+ int(*probe)(struct mipi_dsi_device *dsi);
+ int(*remove)(struct mipi_dsi_device *dsi);
+ void (*shutdown)(struct mipi_dsi_device *dsi);
+};
+
+static inline struct mipi_dsi_driver *
+to_mipi_dsi_driver(struct device_driver *driver)
+{
+ return container_of(driver, struct mipi_dsi_driver, driver);
+}
+
+static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi)
+{
+ return dev_get_drvdata(&dsi->dev);
+}
+
+static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
+{
+ dev_set_drvdata(&dsi->dev, data);
+}
+
+int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
+ struct module *owner);
+void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
+
+#define mipi_dsi_driver_register(driver) \
+ mipi_dsi_driver_register_full(driver, THIS_MODULE)
+
+#define module_mipi_dsi_driver(__mipi_dsi_driver) \
+ module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
+ mipi_dsi_driver_unregister)
+
+#endif /* __DRM_MIPI_DSI__ */
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
new file mode 100644
index 000000000..0de6290df
--- /dev/null
+++ b/include/drm/drm_mm.h
@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_MM_H_
+#define _DRM_MM_H_
+
+/*
+ * Generic range manager structs
+ */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+#endif
+
+enum drm_mm_search_flags {
+ DRM_MM_SEARCH_DEFAULT = 0,
+ DRM_MM_SEARCH_BEST = 1 << 0,
+ DRM_MM_SEARCH_BELOW = 1 << 1,
+};
+
+enum drm_mm_allocator_flags {
+ DRM_MM_CREATE_DEFAULT = 0,
+ DRM_MM_CREATE_TOP = 1 << 0,
+};
+
+#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
+#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
+
+struct drm_mm_node {
+ struct list_head node_list;
+ struct list_head hole_stack;
+ unsigned hole_follows : 1;
+ unsigned scanned_block : 1;
+ unsigned scanned_prev_free : 1;
+ unsigned scanned_next_free : 1;
+ unsigned scanned_preceeds_hole : 1;
+ unsigned allocated : 1;
+ unsigned long color;
+ u64 start;
+ u64 size;
+ struct drm_mm *mm;
+};
+
+struct drm_mm {
+ /* List of all memory nodes that immediately precede a free hole. */
+ struct list_head hole_stack;
+ /* head_node.node_list is the list of all memory nodes, ordered
+ * according to the (increasing) start address of the memory node. */
+ struct drm_mm_node head_node;
+ unsigned int scan_check_range : 1;
+ unsigned scan_alignment;
+ unsigned long scan_color;
+ u64 scan_size;
+ u64 scan_hit_start;
+ u64 scan_hit_end;
+ unsigned scanned_blocks;
+ u64 scan_start;
+ u64 scan_end;
+ struct drm_mm_node *prev_scanned_node;
+
+ void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
+ u64 *start, u64 *end);
+};
+
+/**
+ * drm_mm_node_allocated - checks whether a node is allocated
+ * @node: drm_mm_node to check
+ *
+ * Drivers should use this helpers for proper encapusulation of drm_mm
+ * internals.
+ *
+ * Returns:
+ * True if the @node is allocated.
+ */
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+/**
+ * drm_mm_initialized - checks whether an allocator is initialized
+ * @mm: drm_mm to check
+ *
+ * Drivers should use this helpers for proper encapusulation of drm_mm
+ * internals.
+ *
+ * Returns:
+ * True if the @mm is initialized.
+ */
+static inline bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return mm->hole_stack.next;
+}
+
+static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ return hole_node->start + hole_node->size;
+}
+
+/**
+ * drm_mm_hole_node_start - computes the start of the hole following @node
+ * @hole_node: drm_mm_node which implicitly tracks the following hole
+ *
+ * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
+ * inspect holes themselves. Drivers must check first whether a hole indeed
+ * follows by looking at node->hole_follows.
+ *
+ * Returns:
+ * Start of the subsequent hole.
+ */
+static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
+{
+ BUG_ON(!hole_node->hole_follows);
+ return __drm_mm_hole_node_start(hole_node);
+}
+
+static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ return list_entry(hole_node->node_list.next,
+ struct drm_mm_node, node_list)->start;
+}
+
+/**
+ * drm_mm_hole_node_end - computes the end of the hole following @node
+ * @hole_node: drm_mm_node which implicitly tracks the following hole
+ *
+ * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
+ * inspect holes themselves. Drivers must check first whether a hole indeed
+ * follows by looking at node->hole_follows.
+ *
+ * Returns:
+ * End of the subsequent hole.
+ */
+static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ return __drm_mm_hole_node_end(hole_node);
+}
+
+/**
+ * drm_mm_for_each_node - iterator to walk over all allocated nodes
+ * @entry: drm_mm_node structure to assign to in each iteration step
+ * @mm: drm_mm allocator to walk
+ *
+ * This iterator walks over all nodes in the range allocator. It is implemented
+ * with list_for_each, so not save against removal of elements.
+ */
+#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+ &(mm)->head_node.node_list, \
+ node_list)
+
+/**
+ * drm_mm_for_each_hole - iterator to walk over all holes
+ * @entry: drm_mm_node used internally to track progress
+ * @mm: drm_mm allocator to walk
+ * @hole_start: ulong variable to assign the hole start to on each iteration
+ * @hole_end: ulong variable to assign the hole end to on each iteration
+ *
+ * This iterator walks over all holes in the range allocator. It is implemented
+ * with list_for_each, so not save against removal of elements. @entry is used
+ * internally and will not reflect a real drm_mm_node for the very first hole.
+ * Hence users of this iterator may not access it.
+ *
+ * Implementation Note:
+ * We need to inline list_for_each_entry in order to be able to set hole_start
+ * and hole_end on each iteration while keeping the macro sane.
+ *
+ * The __drm_mm_for_each_hole version is similar, but with added support for
+ * going backwards.
+ */
+#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
+ for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+ &entry->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(entry), \
+ hole_end = drm_mm_hole_node_end(entry), \
+ 1 : 0; \
+ entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
+#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
+ for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+ &entry->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(entry), \
+ hole_end = drm_mm_hole_node_end(entry), \
+ 1 : 0; \
+ entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
+/*
+ * Basic range manager support (drm_mm.c)
+ */
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
+
+int drm_mm_insert_node_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ u64 size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags sflags,
+ enum drm_mm_allocator_flags aflags);
+/**
+ * drm_mm_insert_node - search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @flags: flags to fine-tune the allocation
+ *
+ * This is a simplified version of drm_mm_insert_node_generic() with @color set
+ * to 0.
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+static inline int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ u64 size,
+ unsigned alignment,
+ enum drm_mm_search_flags flags)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
+ DRM_MM_CREATE_DEFAULT);
+}
+
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ u64 size,
+ unsigned alignment,
+ unsigned long color,
+ u64 start,
+ u64 end,
+ enum drm_mm_search_flags sflags,
+ enum drm_mm_allocator_flags aflags);
+/**
+ * drm_mm_insert_node_in_range - ranged search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @start: start of the allowed range for this node
+ * @end: end of the allowed range for this node
+ * @flags: flags to fine-tune the allocation
+ *
+ * This is a simplified version of drm_mm_insert_node_in_range_generic() with
+ * @color set to 0.
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ u64 size,
+ unsigned alignment,
+ u64 start,
+ u64 end,
+ enum drm_mm_search_flags flags)
+{
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+ 0, start, end, flags,
+ DRM_MM_CREATE_DEFAULT);
+}
+
+void drm_mm_remove_node(struct drm_mm_node *node);
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
+void drm_mm_init(struct drm_mm *mm,
+ u64 start,
+ u64 size);
+void drm_mm_takedown(struct drm_mm *mm);
+bool drm_mm_clean(struct drm_mm *mm);
+
+void drm_mm_init_scan(struct drm_mm *mm,
+ u64 size,
+ unsigned alignment,
+ unsigned long color);
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ u64 size,
+ unsigned alignment,
+ unsigned long color,
+ u64 start,
+ u64 end);
+bool drm_mm_scan_add_block(struct drm_mm_node *node);
+bool drm_mm_scan_remove_block(struct drm_mm_node *node);
+
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+#ifdef CONFIG_DEBUG_FS
+int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
+#endif
+
+#endif
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
new file mode 100644
index 000000000..0616188c7
--- /dev/null
+++ b/include/drm/drm_modes.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright © 2006 Keith Packard
+ * Copyright © 2007-2008 Dave Airlie
+ * Copyright © 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright © 2014 Intel Corporation
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __DRM_MODES_H__
+#define __DRM_MODES_H__
+
+/*
+ * Note on terminology: here, for brevity and convenience, we refer to connector
+ * control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
+ * DVI, etc. And 'screen' refers to the whole of the visible display, which
+ * may span multiple monitors (and therefore multiple CRTC and connector
+ * structures).
+ */
+
+enum drm_mode_status {
+ MODE_OK = 0, /* Mode OK */
+ MODE_HSYNC, /* hsync out of range */
+ MODE_VSYNC, /* vsync out of range */
+ MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
+ MODE_BAD_WIDTH, /* requires an unsupported linepitch */
+ MODE_NOMODE, /* no mode with a matching name */
+ MODE_NO_INTERLACE, /* interlaced mode not supported */
+ MODE_NO_DBLESCAN, /* doublescan mode not supported */
+ MODE_NO_VSCAN, /* multiscan mode not supported */
+ MODE_MEM, /* insufficient video memory */
+ MODE_VIRTUAL_X, /* mode width too large for specified virtual size */
+ MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */
+ MODE_MEM_VIRT, /* insufficient video memory given virtual size */
+ MODE_NOCLOCK, /* no fixed clock available */
+ MODE_CLOCK_HIGH, /* clock required is too high */
+ MODE_CLOCK_LOW, /* clock required is too low */
+ MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */
+ MODE_BAD_HVALUE, /* horizontal timing was out of range */
+ MODE_BAD_VVALUE, /* vertical timing was out of range */
+ MODE_BAD_VSCAN, /* VScan value out of range */
+ MODE_HSYNC_NARROW, /* horizontal sync too narrow */
+ MODE_HSYNC_WIDE, /* horizontal sync too wide */
+ MODE_HBLANK_NARROW, /* horizontal blanking too narrow */
+ MODE_HBLANK_WIDE, /* horizontal blanking too wide */
+ MODE_VSYNC_NARROW, /* vertical sync too narrow */
+ MODE_VSYNC_WIDE, /* vertical sync too wide */
+ MODE_VBLANK_NARROW, /* vertical blanking too narrow */
+ MODE_VBLANK_WIDE, /* vertical blanking too wide */
+ MODE_PANEL, /* exceeds panel dimensions */
+ MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */
+ MODE_ONE_WIDTH, /* only one width is supported */
+ MODE_ONE_HEIGHT, /* only one height is supported */
+ MODE_ONE_SIZE, /* only one resolution is supported */
+ MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
+ MODE_NO_STEREO, /* stereo modes not supported */
+ MODE_UNVERIFIED = -3, /* mode needs to reverified */
+ MODE_BAD = -2, /* unspecified reason */
+ MODE_ERROR = -1 /* error condition */
+};
+
+#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \
+ DRM_MODE_TYPE_CRTC_C)
+
+#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \
+ .name = nm, .status = 0, .type = (t), .clock = (c), \
+ .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
+ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
+ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
+ .vscan = (vs), .flags = (f), \
+ .base.type = DRM_MODE_OBJECT_MODE
+
+#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
+#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
+#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
+#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */
+#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN)
+
+#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
+
+struct drm_display_mode {
+ /* Header */
+ struct list_head head;
+ struct drm_mode_object base;
+
+ char name[DRM_DISPLAY_MODE_LEN];
+
+ enum drm_mode_status status;
+ unsigned int type;
+
+ /* Proposed mode values */
+ int clock; /* in kHz */
+ int hdisplay;
+ int hsync_start;
+ int hsync_end;
+ int htotal;
+ int hskew;
+ int vdisplay;
+ int vsync_start;
+ int vsync_end;
+ int vtotal;
+ int vscan;
+ unsigned int flags;
+
+ /* Addressable image size (may be 0 for projectors, etc.) */
+ int width_mm;
+ int height_mm;
+
+ /* Actual mode we give to hw */
+ int crtc_clock; /* in KHz */
+ int crtc_hdisplay;
+ int crtc_hblank_start;
+ int crtc_hblank_end;
+ int crtc_hsync_start;
+ int crtc_hsync_end;
+ int crtc_htotal;
+ int crtc_hskew;
+ int crtc_vdisplay;
+ int crtc_vblank_start;
+ int crtc_vblank_end;
+ int crtc_vsync_start;
+ int crtc_vsync_end;
+ int crtc_vtotal;
+
+ /* Driver private mode info */
+ int *private;
+ int private_flags;
+
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
+ enum hdmi_picture_aspect picture_aspect_ratio;
+};
+
+/* mode specified on the command line */
+struct drm_cmdline_mode {
+ bool specified;
+ bool refresh_specified;
+ bool bpp_specified;
+ int xres, yres;
+ int bpp;
+ int refresh;
+ bool rb;
+ bool interlace;
+ bool cvt;
+ bool margins;
+ enum drm_connector_force force;
+};
+
+/**
+ * drm_mode_is_stereo - check for stereo mode flags
+ * @mode: drm_display_mode to check
+ *
+ * Returns:
+ * True if the mode is one of the stereo modes (like side-by-side), false if
+ * not.
+ */
+static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
+{
+ return mode->flags & DRM_MODE_FLAG_3D_MASK;
+}
+
+struct drm_connector;
+struct drm_cmdline_mode;
+
+struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
+void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
+
+struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool reduced, bool interlaced,
+ bool margins);
+struct drm_display_mode *drm_gtf_mode(struct drm_device *dev,
+ int hdisplay, int vdisplay, int vrefresh,
+ bool interlaced, int margins);
+struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
+ int hdisplay, int vdisplay,
+ int vrefresh, bool interlaced,
+ int margins,
+ int GTF_M, int GTF_2C,
+ int GTF_K, int GTF_2J);
+void drm_display_mode_from_videomode(const struct videomode *vm,
+ struct drm_display_mode *dmode);
+void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
+ struct videomode *vm);
+int of_get_drm_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode,
+ int index);
+
+void drm_mode_set_name(struct drm_display_mode *mode);
+int drm_mode_hsync(const struct drm_display_mode *mode);
+int drm_mode_vrefresh(const struct drm_display_mode *mode);
+
+void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+ int adjust_flags);
+void drm_mode_copy(struct drm_display_mode *dst,
+ const struct drm_display_mode *src);
+struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+bool drm_mode_equal(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2);
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2);
+
+/* for use by the crtc helper probe functions */
+enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode);
+enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode,
+ int maxX, int maxY);
+void drm_mode_prune_invalid(struct drm_device *dev,
+ struct list_head *mode_list, bool verbose);
+void drm_mode_sort(struct list_head *mode_list);
+void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits);
+
+/* parsing cmdline modes */
+bool
+drm_mode_parse_command_line_for_connector(const char *mode_option,
+ struct drm_connector *connector,
+ struct drm_cmdline_mode *mode);
+struct drm_display_mode *
+drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd);
+
+#endif /* __DRM_MODES_H__ */
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
new file mode 100644
index 000000000..70595ff56
--- /dev/null
+++ b/include/drm/drm_modeset_lock.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2014 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_MODESET_LOCK_H_
+#define DRM_MODESET_LOCK_H_
+
+#include <linux/ww_mutex.h>
+
+struct drm_modeset_lock;
+
+/**
+ * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
+ * @ww_ctx: base acquire ctx
+ * @contended: used internally for -EDEADLK handling
+ * @locked: list of held locks
+ * @trylock_only: trylock mode used in atomic contexts/panic notifiers
+ *
+ * Each thread competing for a set of locks must use one acquire
+ * ctx. And if any lock fxn returns -EDEADLK, it must backoff and
+ * retry.
+ */
+struct drm_modeset_acquire_ctx {
+
+ struct ww_acquire_ctx ww_ctx;
+
+ /**
+ * Contended lock: if a lock is contended you should only call
+ * drm_modeset_backoff() which drops locks and slow-locks the
+ * contended lock.
+ */
+ struct drm_modeset_lock *contended;
+
+ /**
+ * list of held locks (drm_modeset_lock)
+ */
+ struct list_head locked;
+
+ /**
+ * Trylock mode, use only for panic handlers!
+ */
+ bool trylock_only;
+};
+
+/**
+ * struct drm_modeset_lock - used for locking modeset resources.
+ * @mutex: resource locking
+ * @head: used to hold it's place on state->locked list when
+ * part of an atomic update
+ *
+ * Used for locking CRTCs and other modeset resources.
+ */
+struct drm_modeset_lock {
+ /**
+ * modeset lock
+ */
+ struct ww_mutex mutex;
+
+ /**
+ * Resources that are locked as part of an atomic update are added
+ * to a list (so we know what to unlock at the end).
+ */
+ struct list_head head;
+};
+
+extern struct ww_class crtc_ww_class;
+
+void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
+ uint32_t flags);
+void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
+void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
+void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
+int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
+
+/**
+ * drm_modeset_lock_init - initialize lock
+ * @lock: lock to init
+ */
+static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock)
+{
+ ww_mutex_init(&lock->mutex, &crtc_ww_class);
+ INIT_LIST_HEAD(&lock->head);
+}
+
+/**
+ * drm_modeset_lock_fini - cleanup lock
+ * @lock: lock to cleanup
+ */
+static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock)
+{
+ WARN_ON(!list_empty(&lock->head));
+}
+
+/**
+ * drm_modeset_is_locked - equivalent to mutex_is_locked()
+ * @lock: lock to check
+ */
+static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
+{
+ return ww_mutex_is_locked(&lock->mutex);
+}
+
+int drm_modeset_lock(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
+ struct drm_modeset_acquire_ctx *ctx);
+void drm_modeset_unlock(struct drm_modeset_lock *lock);
+
+struct drm_device;
+struct drm_crtc;
+struct drm_plane;
+
+void drm_modeset_lock_all(struct drm_device *dev);
+int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
+void drm_modeset_unlock_all(struct drm_device *dev);
+void drm_modeset_lock_crtc(struct drm_crtc *crtc,
+ struct drm_plane *plane);
+void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
+struct drm_modeset_acquire_ctx *
+drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
+
+int drm_modeset_lock_all_crtcs(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
new file mode 100644
index 000000000..2441f7112
--- /dev/null
+++ b/include/drm/drm_of.h
@@ -0,0 +1,18 @@
+#ifndef __DRM_OF_H__
+#define __DRM_OF_H__
+
+struct drm_device;
+struct device_node;
+
+#ifdef CONFIG_OF
+extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port);
+#else
+static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port)
+{
+ return 0;
+}
+#endif
+
+#endif /* __DRM_OF_H__ */
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
new file mode 100644
index 000000000..86ab99bc0
--- /dev/null
+++ b/include/drm/drm_os_linux.h
@@ -0,0 +1,65 @@
+/**
+ * \file drm_os_linux.h
+ * OS abstraction macros.
+ */
+
+#include <linux/interrupt.h> /* For task queue support */
+#include <linux/delay.h>
+
+#ifndef readq
+static inline u64 readq(void __iomem *reg)
+{
+ return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
+}
+
+static inline void writeq(u64 val, void __iomem *reg)
+{
+ writel(val & 0xffffffff, reg);
+ writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+/** Current process ID */
+#define DRM_CURRENTPID task_pid_nr(current)
+#define DRM_UDELAY(d) udelay(d)
+/** Read a byte from a MMIO region */
+#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
+/** Read a word from a MMIO region */
+#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
+/** Read a dword from a MMIO region */
+#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
+/** Write a byte into a MMIO region */
+#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
+/** Write a word into a MMIO region */
+#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
+/** Write a dword into a MMIO region */
+#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
+
+/** Read a qword from a MMIO region - be careful using these unless you really understand them */
+#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset))
+/** Write a qword into a MMIO region */
+#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset))
+
+#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
+do { \
+ DECLARE_WAITQUEUE(entry, current); \
+ unsigned long end = jiffies + (timeout); \
+ add_wait_queue(&(queue), &entry); \
+ \
+ for (;;) { \
+ __set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (time_after_eq(jiffies, end)) { \
+ ret = -EBUSY; \
+ break; \
+ } \
+ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
+ if (signal_pending(current)) { \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&(queue), &entry); \
+} while (0)
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
new file mode 100644
index 000000000..13ff44b28
--- /dev/null
+++ b/include/drm/drm_panel.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_PANEL_H__
+#define __DRM_PANEL_H__
+
+#include <linux/list.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_panel;
+struct display_timing;
+
+/**
+ * struct drm_panel_funcs - perform operations on a given panel
+ * @disable: disable panel (turn off back light, etc.)
+ * @unprepare: turn off panel
+ * @prepare: turn on panel and perform set up
+ * @enable: enable panel (turn on back light, etc.)
+ * @get_modes: add modes to the connector that the panel is attached to and
+ * return the number of modes added
+ * @get_timings: copy display timings into the provided array and return
+ * the number of display timings available
+ *
+ * The .prepare() function is typically called before the display controller
+ * starts to transmit video data. Panel drivers can use this to turn the panel
+ * on and wait for it to become ready. If additional configuration is required
+ * (via a control bus such as I2C, SPI or DSI for example) this is a good time
+ * to do that.
+ *
+ * After the display controller has started transmitting video data, it's safe
+ * to call the .enable() function. This will typically enable the backlight to
+ * make the image on screen visible. Some panels require a certain amount of
+ * time or frames before the image is displayed. This function is responsible
+ * for taking this into account before enabling the backlight to avoid visual
+ * glitches.
+ *
+ * Before stopping video transmission from the display controller it can be
+ * necessary to turn off the panel to avoid visual glitches. This is done in
+ * the .disable() function. Analogously to .enable() this typically involves
+ * turning off the backlight and waiting for some time to make sure no image
+ * is visible on the panel. It is then safe for the display controller to
+ * cease transmission of video data.
+ *
+ * To save power when no video data is transmitted, a driver can power down
+ * the panel. This is the job of the .unprepare() function.
+ */
+struct drm_panel_funcs {
+ int (*disable)(struct drm_panel *panel);
+ int (*unprepare)(struct drm_panel *panel);
+ int (*prepare)(struct drm_panel *panel);
+ int (*enable)(struct drm_panel *panel);
+ int (*get_modes)(struct drm_panel *panel);
+ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
+ struct display_timing *timings);
+};
+
+struct drm_panel {
+ struct drm_device *drm;
+ struct drm_connector *connector;
+ struct device *dev;
+
+ const struct drm_panel_funcs *funcs;
+
+ struct list_head list;
+};
+
+static inline int drm_panel_unprepare(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->unprepare)
+ return panel->funcs->unprepare(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
+static inline int drm_panel_disable(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->disable)
+ return panel->funcs->disable(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
+static inline int drm_panel_prepare(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->prepare)
+ return panel->funcs->prepare(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
+static inline int drm_panel_enable(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->enable)
+ return panel->funcs->enable(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
+static inline int drm_panel_get_modes(struct drm_panel *panel)
+{
+ if (panel && panel->funcs && panel->funcs->get_modes)
+ return panel->funcs->get_modes(panel);
+
+ return panel ? -ENOSYS : -EINVAL;
+}
+
+void drm_panel_init(struct drm_panel *panel);
+
+int drm_panel_add(struct drm_panel *panel);
+void drm_panel_remove(struct drm_panel *panel);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
+int drm_panel_detach(struct drm_panel *panel);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np);
+#else
+static inline struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
new file mode 100644
index 000000000..45c39a37f
--- /dev/null
+++ b/include/drm/drm_pciids.h
@@ -0,0 +1,812 @@
+#define radeon_PCI_IDS \
+ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \
+ {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
+ {0x1002, 0x4144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x414A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x414B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
+ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250}, \
+ {0x1002, 0x4A48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A4F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4A54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4B4C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+ {0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+ {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
+ {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
+ {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
+ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
+ {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6703, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6704, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6705, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6706, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6707, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6708, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6709, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6718, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68a9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68d9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68da, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68de, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+ {0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9495, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0, 0, 0}
+
+#define r128_PCI_IDS \
+ {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define mga_PCI_IDS \
+ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
+ {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
+ {0, 0, 0}
+
+#define sisdrv_PCI_IDS \
+ {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
+ {0, 0, 0}
+
+#define tdfx_PCI_IDS \
+ {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define viadrv_PCI_IDS \
+ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+ {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
+ {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
+ {0, 0, 0}
+
+#define i810_PCI_IDS \
+ {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
+#define savage_PCI_IDS \
+ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+ {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+ {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+ {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+ {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+ {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+ {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+ {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+ {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+ {0, 0, 0}
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
new file mode 100644
index 000000000..96e16283a
--- /dev/null
+++ b/include/drm/drm_plane_helper.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_PLANE_HELPER_H
+#define DRM_PLANE_HELPER_H
+
+#include <drm/drm_rect.h>
+#include <drm/drm_crtc.h>
+
+/*
+ * Drivers that don't allow primary plane scaling may pass this macro in place
+ * of the min/max scale parameters of the update checker function.
+ *
+ * Due to src being in 16.16 fixed point and dest being in integer pixels,
+ * 1<<16 represents no scaling.
+ */
+#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
+
+/**
+ * DOC: plane helpers
+ *
+ * Helper functions to assist with creation and handling of CRTC primary
+ * planes.
+ */
+
+extern int drm_crtc_init(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ const struct drm_crtc_funcs *funcs);
+
+/**
+ * drm_plane_helper_funcs - helper operations for CRTCs
+ * @prepare_fb: prepare a framebuffer for use by the plane
+ * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
+ * @atomic_check: check that a given atomic state is valid and can be applied
+ * @atomic_update: apply an atomic state to the plane (mandatory)
+ * @atomic_disable: disable the plane
+ *
+ * The helper operations are called by the mid-layer CRTC helper.
+ */
+struct drm_plane_helper_funcs {
+ int (*prepare_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *new_state);
+ void (*cleanup_fb)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ const struct drm_plane_state *old_state);
+
+ int (*atomic_check)(struct drm_plane *plane,
+ struct drm_plane_state *state);
+ void (*atomic_update)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+ void (*atomic_disable)(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+};
+
+static inline void drm_plane_helper_add(struct drm_plane *plane,
+ const struct drm_plane_helper_funcs *funcs)
+{
+ plane->helper_private = funcs;
+}
+
+extern int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible);
+extern int drm_primary_helper_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+extern int drm_primary_helper_disable(struct drm_plane *plane);
+extern void drm_primary_helper_destroy(struct drm_plane *plane);
+extern const struct drm_plane_funcs drm_primary_helper_funcs;
+
+int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+int drm_plane_helper_disable(struct drm_plane *plane);
+
+/* For use by drm_crtc_helper.c */
+int drm_plane_helper_commit(struct drm_plane *plane,
+ struct drm_plane_state *plane_state,
+ struct drm_framebuffer *old_fb);
+#endif
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
new file mode 100644
index 000000000..26bb55e9e
--- /dev/null
+++ b/include/drm/drm_rect.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2011-2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_RECT_H
+#define DRM_RECT_H
+
+/**
+ * DOC: rect utils
+ *
+ * Utility functions to help manage rectangular areas for
+ * clipping, scaling, etc. calculations.
+ */
+
+/**
+ * struct drm_rect - two dimensional rectangle
+ * @x1: horizontal starting coordinate (inclusive)
+ * @x2: horizontal ending coordinate (exclusive)
+ * @y1: vertical starting coordinate (inclusive)
+ * @y2: vertical ending coordinate (exclusive)
+ */
+struct drm_rect {
+ int x1, y1, x2, y2;
+};
+
+/**
+ * drm_rect_adjust_size - adjust the size of the rectangle
+ * @r: rectangle to be adjusted
+ * @dw: horizontal adjustment
+ * @dh: vertical adjustment
+ *
+ * Change the size of rectangle @r by @dw in the horizontal direction,
+ * and by @dh in the vertical direction, while keeping the center
+ * of @r stationary.
+ *
+ * Positive @dw and @dh increase the size, negative values decrease it.
+ */
+static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
+{
+ r->x1 -= dw >> 1;
+ r->y1 -= dh >> 1;
+ r->x2 += (dw + 1) >> 1;
+ r->y2 += (dh + 1) >> 1;
+}
+
+/**
+ * drm_rect_translate - translate the rectangle
+ * @r: rectangle to be tranlated
+ * @dx: horizontal translation
+ * @dy: vertical translation
+ *
+ * Move rectangle @r by @dx in the horizontal direction,
+ * and by @dy in the vertical direction.
+ */
+static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
+{
+ r->x1 += dx;
+ r->y1 += dy;
+ r->x2 += dx;
+ r->y2 += dy;
+}
+
+/**
+ * drm_rect_downscale - downscale a rectangle
+ * @r: rectangle to be downscaled
+ * @horz: horizontal downscale factor
+ * @vert: vertical downscale factor
+ *
+ * Divide the coordinates of rectangle @r by @horz and @vert.
+ */
+static inline void drm_rect_downscale(struct drm_rect *r, int horz, int vert)
+{
+ r->x1 /= horz;
+ r->y1 /= vert;
+ r->x2 /= horz;
+ r->y2 /= vert;
+}
+
+/**
+ * drm_rect_width - determine the rectangle width
+ * @r: rectangle whose width is returned
+ *
+ * RETURNS:
+ * The width of the rectangle.
+ */
+static inline int drm_rect_width(const struct drm_rect *r)
+{
+ return r->x2 - r->x1;
+}
+
+/**
+ * drm_rect_height - determine the rectangle height
+ * @r: rectangle whose height is returned
+ *
+ * RETURNS:
+ * The height of the rectangle.
+ */
+static inline int drm_rect_height(const struct drm_rect *r)
+{
+ return r->y2 - r->y1;
+}
+
+/**
+ * drm_rect_visible - determine if the the rectangle is visible
+ * @r: rectangle whose visibility is returned
+ *
+ * RETURNS:
+ * %true if the rectangle is visible, %false otherwise.
+ */
+static inline bool drm_rect_visible(const struct drm_rect *r)
+{
+ return drm_rect_width(r) > 0 && drm_rect_height(r) > 0;
+}
+
+/**
+ * drm_rect_equals - determine if two rectangles are equal
+ * @r1: first rectangle
+ * @r2: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles are equal, %false otherwise.
+ */
+static inline bool drm_rect_equals(const struct drm_rect *r1,
+ const struct drm_rect *r2)
+{
+ return r1->x1 == r2->x1 && r1->x2 == r2->x2 &&
+ r1->y1 == r2->y1 && r1->y2 == r2->y2;
+}
+
+bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
+bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
+ const struct drm_rect *clip,
+ int hscale, int vscale);
+int drm_rect_calc_hscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_hscale, int max_hscale);
+int drm_rect_calc_vscale(const struct drm_rect *src,
+ const struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+int drm_rect_calc_hscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_hscale, int max_hscale);
+int drm_rect_calc_vscale_relaxed(struct drm_rect *src,
+ struct drm_rect *dst,
+ int min_vscale, int max_vscale);
+void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point);
+void drm_rect_rotate(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation);
+void drm_rect_rotate_inv(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation);
+
+#endif
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
new file mode 100644
index 000000000..1d8e033fd
--- /dev/null
+++ b/include/drm/drm_sysfs.h
@@ -0,0 +1,12 @@
+#ifndef _DRM_SYSFS_H_
+#define _DRM_SYSFS_H_
+
+/**
+ * This minimalistic include file is intended for users (read TTM) that
+ * don't want to include the full drmP.h file.
+ */
+
+extern int drm_class_device_register(struct device *dev);
+extern void drm_class_device_unregister(struct device *dev);
+
+#endif
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
new file mode 100644
index 000000000..8cd402c73
--- /dev/null
+++ b/include/drm/drm_vma_manager.h
@@ -0,0 +1,257 @@
+#ifndef __DRM_VMA_MANAGER_H__
+#define __DRM_VMA_MANAGER_H__
+
+/*
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mm.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct drm_vma_offset_file {
+ struct rb_node vm_rb;
+ struct file *vm_filp;
+ unsigned long vm_count;
+};
+
+struct drm_vma_offset_node {
+ rwlock_t vm_lock;
+ struct drm_mm_node vm_node;
+ struct rb_node vm_rb;
+ struct rb_root vm_files;
+};
+
+struct drm_vma_offset_manager {
+ rwlock_t vm_lock;
+ struct rb_root vm_addr_space_rb;
+ struct drm_mm vm_addr_space_mm;
+};
+
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size);
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages);
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages);
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages);
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node);
+
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp);
+
+/**
+ * drm_vma_offset_exact_lookup() - Look up node by exact address
+ * @mgr: Manager object
+ * @start: Start address (page-based, not byte-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
+ * It only returns the exact object with the given start address.
+ *
+ * RETURNS:
+ * Node at exact start address @start.
+ */
+static inline struct drm_vma_offset_node *
+drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ node = drm_vma_offset_lookup(mgr, start, pages);
+ return (node && node->vm_node.start == start) ? node : NULL;
+}
+
+/**
+ * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
+ * are allowed while holding this lock. All other contexts are blocked from VMA
+ * until the lock is released via drm_vma_offset_unlock_lookup().
+ *
+ * Use this if you need to take a reference to the objects returned by
+ * drm_vma_offset_lookup_locked() before releasing this lock again.
+ *
+ * This lock must not be used for anything else than extended lookups. You must
+ * not call any other VMA helpers while holding this lock.
+ *
+ * Note: You're in atomic-context while holding this lock!
+ *
+ * Example:
+ * drm_vma_offset_lock_lookup(mgr);
+ * node = drm_vma_offset_lookup_locked(mgr);
+ * if (node)
+ * kref_get_unless_zero(container_of(node, sth, entr));
+ * drm_vma_offset_unlock_lookup(mgr);
+ */
+static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
+{
+ read_lock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
+ */
+static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
+{
+ read_unlock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_node_reset() - Initialize or reset node object
+ * @node: Node to initialize or reset
+ *
+ * Reset a node to its initial state. This must be called before using it with
+ * any VMA offset manager.
+ *
+ * This must not be called on an already allocated node, or you will leak
+ * memory.
+ */
+static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
+{
+ memset(node, 0, sizeof(*node));
+ node->vm_files = RB_ROOT;
+ rwlock_init(&node->vm_lock);
+}
+
+/**
+ * drm_vma_node_start() - Return start address for page-based addressing
+ * @node: Node to inspect
+ *
+ * Return the start address of the given node. This can be used as offset into
+ * the linear VM space that is provided by the VMA offset manager. Note that
+ * this can only be used for page-based addressing. If you need a proper offset
+ * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
+ * drm_vma_node_offset_addr() helper instead.
+ *
+ * RETURNS:
+ * Start address of @node for page-based addressing. 0 if the node does not
+ * have an offset allocated.
+ */
+static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
+{
+ return node->vm_node.start;
+}
+
+/**
+ * drm_vma_node_size() - Return size (page-based)
+ * @node: Node to inspect
+ *
+ * Return the size as number of pages for the given node. This is the same size
+ * that was passed to drm_vma_offset_add(). If no offset is allocated for the
+ * node, this is 0.
+ *
+ * RETURNS:
+ * Size of @node as number of pages. 0 if the node does not have an offset
+ * allocated.
+ */
+static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
+{
+ return node->vm_node.size;
+}
+
+/**
+ * drm_vma_node_has_offset() - Check whether node is added to offset manager
+ * @node: Node to be checked
+ *
+ * RETURNS:
+ * true iff the node was previously allocated an offset and added to
+ * an vma offset manager.
+ */
+static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
+{
+ return drm_mm_node_allocated(&node->vm_node);
+}
+
+/**
+ * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
+ * @node: Linked offset node
+ *
+ * Same as drm_vma_node_start() but returns the address as a valid offset that
+ * can be used for user-space mappings during mmap().
+ * This must not be called on unlinked nodes.
+ *
+ * RETURNS:
+ * Offset of @node for byte-based addressing. 0 if the node does not have an
+ * object allocated.
+ */
+static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+ return ((__u64)node->vm_node.start) << PAGE_SHIFT;
+}
+
+/**
+ * drm_vma_node_unmap() - Unmap offset node
+ * @node: Offset node
+ * @file_mapping: Address space to unmap @node from
+ *
+ * Unmap all userspace mappings for a given offset node. The mappings must be
+ * associated with the @file_mapping address-space. If no offset exists
+ * nothing is done.
+ *
+ * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
+ * is not called on this node concurrently.
+ */
+static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
+ struct address_space *file_mapping)
+{
+ if (drm_vma_node_has_offset(node))
+ unmap_mapping_range(file_mapping,
+ drm_vma_node_offset_addr(node),
+ drm_vma_node_size(node) << PAGE_SHIFT, 1);
+}
+
+/**
+ * drm_vma_node_verify_access() - Access verification helper for TTM
+ * @node: Offset node
+ * @filp: Open-file
+ *
+ * This checks whether @filp is granted access to @node. It is the same as
+ * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
+ * verify_access() callbacks.
+ *
+ * RETURNS:
+ * 0 if access is granted, -EACCES otherwise.
+ */
+static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+}
+
+#endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
new file mode 100644
index 000000000..cb65fa14a
--- /dev/null
+++ b/include/drm/exynos_drm.h
@@ -0,0 +1,101 @@
+/* exynos_drm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _EXYNOS_DRM_H_
+#define _EXYNOS_DRM_H_
+
+#include <uapi/drm/exynos_drm.h>
+#include <video/videomode.h>
+
+/**
+ * A structure for lcd panel information.
+ *
+ * @timing: default video mode for initializing
+ * @width_mm: physical size of lcd width.
+ * @height_mm: physical size of lcd height.
+ */
+struct exynos_drm_panel_info {
+ struct videomode vm;
+ u32 width_mm;
+ u32 height_mm;
+};
+
+/**
+ * Platform Specific Structure for DRM based FIMD.
+ *
+ * @panel: default panel info for initializing
+ * @default_win: default window layer number to be used for UI.
+ * @bpp: default bit per pixel.
+ */
+struct exynos_drm_fimd_pdata {
+ struct exynos_drm_panel_info panel;
+ u32 vidcon0;
+ u32 vidcon1;
+ unsigned int default_win;
+ unsigned int bpp;
+};
+
+/**
+ * Platform Specific Structure for DRM based HDMI.
+ *
+ * @hdmi_dev: device point to specific hdmi driver.
+ * @mixer_dev: device point to specific mixer driver.
+ *
+ * this structure is used for common hdmi driver and each device object
+ * would be used to access specific device driver(hdmi or mixer driver)
+ */
+struct exynos_drm_common_hdmi_pd {
+ struct device *hdmi_dev;
+ struct device *mixer_dev;
+};
+
+/**
+ * Platform Specific Structure for DRM based HDMI core.
+ *
+ * @is_v13: set if hdmi version 13 is.
+ * @cfg_hpd: function pointer to configure hdmi hotplug detection pin
+ * @get_hpd: function pointer to get value of hdmi hotplug detection pin
+ */
+struct exynos_drm_hdmi_pdata {
+ bool is_v13;
+ void (*cfg_hpd)(bool external);
+ int (*get_hpd)(void);
+};
+
+/**
+ * Platform Specific Structure for DRM based IPP.
+ *
+ * @inv_pclk: if set 1. invert pixel clock
+ * @inv_vsync: if set 1. invert vsync signal for wb
+ * @inv_href: if set 1. invert href signal
+ * @inv_hsync: if set 1. invert hsync signal for wb
+ */
+struct exynos_drm_ipp_pol {
+ unsigned int inv_pclk;
+ unsigned int inv_vsync;
+ unsigned int inv_href;
+ unsigned int inv_hsync;
+};
+
+/**
+ * Platform Specific Structure for DRM based FIMC.
+ *
+ * @pol: current hardware block polarity settings.
+ * @clk_rate: current hardware clock rate.
+ */
+struct exynos_drm_fimc_pdata {
+ struct exynos_drm_ipp_pol pol;
+ int clk_rate;
+};
+
+#endif /* _EXYNOS_DRM_H_ */
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
new file mode 100644
index 000000000..87ac5e6ca
--- /dev/null
+++ b/include/drm/gma_drm.h
@@ -0,0 +1,25 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#ifndef _GMA_DRM_H_
+#define _GMA_DRM_H_
+
+#endif
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
new file mode 100644
index 000000000..8390b437a
--- /dev/null
+++ b/include/drm/i2c/ch7006.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_H__
+#define __DRM_I2C_CH7006_H__
+
+/**
+ * struct ch7006_encoder_params
+ *
+ * Describes how the ch7006 is wired up with the GPU. It should be
+ * used as the @params parameter of its @set_config method.
+ *
+ * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
+ * meaning.
+ */
+struct ch7006_encoder_params {
+ enum {
+ CH7006_FORMAT_RGB16 = 0,
+ CH7006_FORMAT_YCrCb24m16,
+ CH7006_FORMAT_RGB24m16,
+ CH7006_FORMAT_RGB15,
+ CH7006_FORMAT_RGB24m12C,
+ CH7006_FORMAT_RGB24m12I,
+ CH7006_FORMAT_RGB24m8,
+ CH7006_FORMAT_RGB16m8,
+ CH7006_FORMAT_RGB15m8,
+ CH7006_FORMAT_YCrCb24m8,
+ } input_format;
+
+ enum {
+ CH7006_CLOCK_SLAVE = 0,
+ CH7006_CLOCK_MASTER,
+ } clock_mode;
+
+ enum {
+ CH7006_CLOCK_EDGE_NEG = 0,
+ CH7006_CLOCK_EDGE_POS,
+ } clock_edge;
+
+ int xcm, pcm;
+
+ enum {
+ CH7006_SYNC_SLAVE = 0,
+ CH7006_SYNC_MASTER,
+ } sync_direction;
+
+ enum {
+ CH7006_SYNC_SEPARATED = 0,
+ CH7006_SYNC_EMBEDDED,
+ } sync_encoding;
+
+ enum {
+ CH7006_POUT_1_8V = 0,
+ CH7006_POUT_3_3V,
+ } pout_level;
+
+ enum {
+ CH7006_ACTIVE_HSYNC = 0,
+ CH7006_ACTIVE_DSTART,
+ } active_detect;
+};
+
+#endif
diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h
new file mode 100644
index 000000000..205e27384
--- /dev/null
+++ b/include/drm/i2c/sil164.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_SIL164_H__
+#define __DRM_I2C_SIL164_H__
+
+/**
+ * struct sil164_encoder_params
+ *
+ * Describes how the sil164 is connected to the GPU. It should be used
+ * as the @params parameter of its @set_config method.
+ *
+ * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
+ */
+struct sil164_encoder_params {
+ enum {
+ SIL164_INPUT_EDGE_FALLING = 0,
+ SIL164_INPUT_EDGE_RISING
+ } input_edge;
+
+ enum {
+ SIL164_INPUT_WIDTH_12BIT = 0,
+ SIL164_INPUT_WIDTH_24BIT
+ } input_width;
+
+ enum {
+ SIL164_INPUT_SINGLE_EDGE = 0,
+ SIL164_INPUT_DUAL_EDGE
+ } input_dual;
+
+ enum {
+ SIL164_PLL_FILTER_ON = 0,
+ SIL164_PLL_FILTER_OFF,
+ } pll_filter;
+
+ int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */
+ int duallink_skew; /** < Allowed range [-4, 3]. */
+};
+
+#endif
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
new file mode 100644
index 000000000..3e419d92c
--- /dev/null
+++ b/include/drm/i2c/tda998x.h
@@ -0,0 +1,30 @@
+#ifndef __DRM_I2C_TDA998X_H__
+#define __DRM_I2C_TDA998X_H__
+
+struct tda998x_encoder_params {
+ u8 swap_b:3;
+ u8 mirr_b:1;
+ u8 swap_a:3;
+ u8 mirr_a:1;
+ u8 swap_d:3;
+ u8 mirr_d:1;
+ u8 swap_c:3;
+ u8 mirr_c:1;
+ u8 swap_f:3;
+ u8 mirr_f:1;
+ u8 swap_e:3;
+ u8 mirr_e:1;
+
+ u8 audio_cfg;
+ u8 audio_clk_cfg;
+ u8 audio_frame[6];
+
+ enum {
+ AFMT_SPDIF,
+ AFMT_I2S
+ } audio_format;
+
+ unsigned audio_sample_rate;
+};
+
+#endif
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
new file mode 100644
index 000000000..3e2f22e5b
--- /dev/null
+++ b/include/drm/i915_component.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _I915_COMPONENT_H_
+#define _I915_COMPONENT_H_
+
+struct i915_audio_component {
+ struct device *dev;
+
+ const struct i915_audio_component_ops {
+ struct module *owner;
+ void (*get_power)(struct device *);
+ void (*put_power)(struct device *);
+ int (*get_cdclk_freq)(struct device *);
+ } *ops;
+};
+
+#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
new file mode 100644
index 000000000..595f85c39
--- /dev/null
+++ b/include/drm/i915_drm.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _I915_DRM_H_
+#define _I915_DRM_H_
+
+#include <drm/i915_pciids.h>
+#include <uapi/drm/i915_drm.h>
+
+/* For use by IPS driver */
+extern unsigned long i915_read_mch_val(void);
+extern bool i915_gpu_raise(void);
+extern bool i915_gpu_lower(void);
+extern bool i915_gpu_busy(void);
+extern bool i915_gpu_turbo_disable(void);
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
+ */
+#define INTEL_GMCH_CTRL 0x52
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define BDW_GMCH_GGMS_SHIFT 6
+#define BDW_GMCH_GGMS_MASK 0x3
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+
+#define I830_GMCH_CTRL 0x52
+
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+
+#define I855_GMCH_GMS_MASK 0xF0
+#define I855_GMCH_GMS_STOLEN_0M 0x0
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+
+#define I830_DRB3 0x63
+#define I85X_DRB3 0x43
+#define I865_TOUD 0xc4
+
+#define I830_ESMRAMC 0x91
+#define I845_ESMRAMC 0x9e
+#define I85X_ESMRAMC 0x61
+#define TSEG_ENABLE (1 << 0)
+#define I830_TSEG_SIZE_512K (0 << 1)
+#define I830_TSEG_SIZE_1M (1 << 1)
+#define I845_TSEG_SIZE_MASK (3 << 1)
+#define I845_TSEG_SIZE_512K (2 << 1)
+#define I845_TSEG_SIZE_1M (3 << 1)
+
+#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
new file mode 100644
index 000000000..613372375
--- /dev/null
+++ b/include/drm/i915_pciids.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _I915_PCIIDS_H
+#define _I915_PCIIDS_H
+
+/*
+ * A pci_device_id struct {
+ * __u32 vendor, device;
+ * __u32 subvendor, subdevice;
+ * __u32 class, class_mask;
+ * kernel_ulong_t driver_data;
+ * };
+ * Don't use C99 here because "class" is reserved and we want to
+ * give userspace flexibility.
+ */
+#define INTEL_VGA_DEVICE(id, info) { \
+ 0x8086, id, \
+ ~0, ~0, \
+ 0x030000, 0xff0000, \
+ (unsigned long) info }
+
+#define INTEL_QUANTA_VGA_DEVICE(info) { \
+ 0x8086, 0x16a, \
+ 0x152d, 0x8990, \
+ 0x030000, 0xff0000, \
+ (unsigned long) info }
+
+#define INTEL_I830_IDS(info) \
+ INTEL_VGA_DEVICE(0x3577, info)
+
+#define INTEL_I845G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2562, info)
+
+#define INTEL_I85X_IDS(info) \
+ INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
+ INTEL_VGA_DEVICE(0x358e, info)
+
+#define INTEL_I865G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
+
+#define INTEL_I915G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
+ INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
+
+#define INTEL_I915GM_IDS(info) \
+ INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
+
+#define INTEL_I945G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
+
+#define INTEL_I945GM_IDS(info) \
+ INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
+ INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
+
+#define INTEL_I965G_IDS(info) \
+ INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
+ INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
+ INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
+ INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
+
+#define INTEL_G33_IDS(info) \
+ INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
+ INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
+ INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
+
+#define INTEL_I965GM_IDS(info) \
+ INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
+ INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
+
+#define INTEL_GM45_IDS(info) \
+ INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
+
+#define INTEL_G45_IDS(info) \
+ INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
+ INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
+ INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
+ INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
+ INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
+ INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
+
+#define INTEL_PINEVIEW_IDS(info) \
+ INTEL_VGA_DEVICE(0xa001, info), \
+ INTEL_VGA_DEVICE(0xa011, info)
+
+#define INTEL_IRONLAKE_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0042, info)
+
+#define INTEL_IRONLAKE_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0046, info)
+
+#define INTEL_SNB_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0102, info), \
+ INTEL_VGA_DEVICE(0x0112, info), \
+ INTEL_VGA_DEVICE(0x0122, info), \
+ INTEL_VGA_DEVICE(0x010A, info)
+
+#define INTEL_SNB_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0106, info), \
+ INTEL_VGA_DEVICE(0x0116, info), \
+ INTEL_VGA_DEVICE(0x0126, info)
+
+#define INTEL_IVB_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
+
+#define INTEL_IVB_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
+ INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
+
+#define INTEL_IVB_Q_IDS(info) \
+ INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
+
+#define INTEL_HSW_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
+ INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+ INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+ INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
+ INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
+ INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
+ INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
+ INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
+ INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
+ INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
+ INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
+ INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
+ INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \
+
+#define INTEL_HSW_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
+
+#define INTEL_VLV_M_IDS(info) \
+ INTEL_VGA_DEVICE(0x0f30, info), \
+ INTEL_VGA_DEVICE(0x0f31, info), \
+ INTEL_VGA_DEVICE(0x0f32, info), \
+ INTEL_VGA_DEVICE(0x0f33, info), \
+ INTEL_VGA_DEVICE(0x0157, info)
+
+#define INTEL_VLV_D_IDS(info) \
+ INTEL_VGA_DEVICE(0x0155, info)
+
+#define INTEL_BDW_GT12M_IDS(info) \
+ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
+ INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
+ INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+ INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
+ INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
+
+#define INTEL_BDW_GT12D_IDS(info) \
+ INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
+ INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
+ INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
+ INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
+
+#define INTEL_BDW_GT3M_IDS(info) \
+ INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x162E, info) /* ULX */
+
+#define INTEL_BDW_GT3D_IDS(info) \
+ INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
+
+#define INTEL_BDW_RSVDM_IDS(info) \
+ INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
+ INTEL_VGA_DEVICE(0x163E, info) /* ULX */
+
+#define INTEL_BDW_RSVDD_IDS(info) \
+ INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
+ INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
+
+#define INTEL_BDW_M_IDS(info) \
+ INTEL_BDW_GT12M_IDS(info), \
+ INTEL_BDW_GT3M_IDS(info), \
+ INTEL_BDW_RSVDM_IDS(info)
+
+#define INTEL_BDW_D_IDS(info) \
+ INTEL_BDW_GT12D_IDS(info), \
+ INTEL_BDW_GT3D_IDS(info), \
+ INTEL_BDW_RSVDD_IDS(info)
+
+#define INTEL_CHV_IDS(info) \
+ INTEL_VGA_DEVICE(0x22b0, info), \
+ INTEL_VGA_DEVICE(0x22b1, info), \
+ INTEL_VGA_DEVICE(0x22b2, info), \
+ INTEL_VGA_DEVICE(0x22b3, info)
+
+#define INTEL_SKL_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
+ INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
+ INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
+
+#define INTEL_SKL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
+ INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
+ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
+ INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+
+#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \
+
+#define INTEL_SKL_IDS(info) \
+ INTEL_SKL_GT1_IDS(info), \
+ INTEL_SKL_GT2_IDS(info), \
+ INTEL_SKL_GT3_IDS(info)
+
+
+#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
new file mode 100644
index 000000000..b08bdade6
--- /dev/null
+++ b/include/drm/intel-gtt.h
@@ -0,0 +1,32 @@
+/* Common header for intel-gtt.ko and i915.ko */
+
+#ifndef _DRM_INTEL_GTT_H
+#define _DRM_INTEL_GTT_H
+
+void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+ phys_addr_t *mappable_base, unsigned long *mappable_end);
+
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(void);
+
+bool intel_enable_gtt(void);
+
+void intel_gtt_chipset_flush(void);
+void intel_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int intel_iommu_gfx_mapped;
+#endif
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
new file mode 100644
index 000000000..c768ddfbe
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -0,0 +1,700 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/reservation.h>
+
+struct ttm_bo_device;
+
+struct drm_mm_node;
+
+/**
+ * struct ttm_place
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @flags: memory domain and caching flags for the object
+ *
+ * Structure indicating a possible place to put an object.
+ */
+struct ttm_place {
+ unsigned fpfn;
+ unsigned lpfn;
+ uint32_t flags;
+};
+
+/**
+ * struct ttm_placement
+ *
+ * @num_placement: number of preferred placements
+ * @placement: preferred placements
+ * @num_busy_placement: number of preferred placements when need to evict buffer
+ * @busy_placement: preferred placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+ unsigned num_placement;
+ const struct ttm_place *placement;
+ unsigned num_busy_placement;
+ const struct ttm_place *busy_placement;
+};
+
+/**
+ * struct ttm_bus_placement
+ *
+ * @addr: mapped virtual address
+ * @base: bus base address
+ * @is_iomem: is this io memory ?
+ * @size: size in byte
+ * @offset: offset from the base address
+ * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ void *addr;
+ unsigned long base;
+ unsigned long size;
+ unsigned long offset;
+ bool is_iomem;
+ bool io_reserved_vm;
+ uint64_t io_reserved_count;
+};
+
+
+/**
+ * struct ttm_mem_reg
+ *
+ * @mm_node: Memory manager node.
+ * @size: Requested size of memory region.
+ * @num_pages: Actual size of memory region in pages.
+ * @page_alignment: Page alignment.
+ * @placement: Placement flags.
+ * @bus: Placement on io bus accessible to the CPU
+ *
+ * Structure indicating the placement and space resources used by a
+ * buffer object.
+ */
+
+struct ttm_mem_reg {
+ void *mm_node;
+ unsigned long start;
+ unsigned long size;
+ unsigned long num_pages;
+ uint32_t page_alignment;
+ uint32_t mem_type;
+ uint32_t placement;
+ struct ttm_bus_placement bus;
+};
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device: These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
+ */
+
+enum ttm_bo_type {
+ ttm_bo_type_device,
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
+};
+
+struct ttm_tt;
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @bdev: Pointer to the buffer object device structure.
+ * @type: The bo type.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @num_pages: Actual number of pages.
+ * @acc_size: Accounted size for this object.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is put on the delayed delete list.
+ * @list_kref: List reference count of this buffer object. This member is
+ * used to avoid destruction while the buffer object is still on a list.
+ * Lru lists may keep one refcount, the delayed delete list, and kref != 0
+ * keeps one refcount. When this refcount reaches zero,
+ * the object is destroyed.
+ * @mem: structure describing current placement.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object.
+ * @ttm: TTM structure holding system pages.
+ * @evicted: Whether the object was evicted without user-space knowing.
+ * @cpu_writes: For synchronization. Number of cpu writers.
+ * @lru: List head for the lru list.
+ * @ddestroy: List head for the delayed destroy list.
+ * @swap: List head for swap LRU list.
+ * @priv_flags: Flags describing buffer object internal state.
+ * @vma_node: Address space manager node.
+ * @offset: The current GPU offset, which can have different meanings
+ * depending on the memory type. For SYSTEM type memory, it should be 0.
+ * @cur_placement: Hint of current placement.
+ * @wu_mutex: Wait unreserved mutex.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+
+struct ttm_buffer_object {
+ /**
+ * Members constant at init.
+ */
+
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ enum ttm_bo_type type;
+ void (*destroy) (struct ttm_buffer_object *);
+ unsigned long num_pages;
+ size_t acc_size;
+
+ /**
+ * Members not needing protection.
+ */
+
+ struct kref kref;
+ struct kref list_kref;
+
+ /**
+ * Members protected by the bo::resv::reserved lock.
+ */
+
+ struct ttm_mem_reg mem;
+ struct file *persistent_swap_storage;
+ struct ttm_tt *ttm;
+ bool evicted;
+
+ /**
+ * Members protected by the bo::reserved lock only when written to.
+ */
+
+ atomic_t cpu_writers;
+
+ /**
+ * Members protected by the bdev::lru_lock.
+ */
+
+ struct list_head lru;
+ struct list_head ddestroy;
+ struct list_head swap;
+ struct list_head io_reserve_lru;
+
+ /**
+ * Members protected by a bo reservation.
+ */
+
+ unsigned long priv_flags;
+
+ struct drm_vma_offset_node vma_node;
+
+ /**
+ * Special members that are protected by the reserve lock
+ * and the bo::lock when written to. Can be read with
+ * either of these locks held.
+ */
+
+ uint64_t offset; /* GPU address space is independent of CPU word size */
+ uint32_t cur_placement;
+
+ struct sg_table *sg;
+
+ struct reservation_object *resv;
+ struct reservation_object ttm_resv;
+ struct mutex wu_mutex;
+};
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+struct ttm_bo_kmap_obj {
+ void *virtual;
+ struct page *page;
+ enum {
+ ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
+ ttm_bo_map_vmap = 2,
+ ttm_bo_map_kmap = 3,
+ ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
+ } bo_kmap_type;
+ struct ttm_buffer_object *bo;
+};
+
+/**
+ * ttm_bo_reference - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ *
+ * Returns a refcounted pointer to a buffer object.
+ */
+
+static inline struct ttm_buffer_object *
+ttm_bo_reference(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+ return bo;
+}
+
+/**
+ * ttm_bo_wait - wait for buffer idle.
+ *
+ * @bo: The buffer object.
+ * @interruptible: Use interruptible wait.
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * This function must be called with the bo::mutex held, and makes
+ * sure any previous rendering to the buffer is completed.
+ * Note: It might be necessary to block validations before the
+ * wait by reserving the buffer.
+ * Returns -EBUSY if no_wait is true and the buffer is busy.
+ * Returns -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+ bool interruptible, bool no_wait);
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible,
+ bool no_wait_gpu);
+
+/**
+ * ttm_bo_unref
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference and clear a pointer to a buffer object.
+ */
+extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
+/**
+ * ttm_bo_lock_delayed_workqueue
+ *
+ * Prevent the delayed workqueue from running.
+ * Returns
+ * True if the workqueue was queued at the time
+ */
+extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_unlock_delayed_workqueue
+ *
+ * Allows the delayed workqueue to run.
+ */
+extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+ int resched);
+
+/**
+ * ttm_bo_synccpu_write_grab
+ *
+ * @bo: The buffer object:
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * Synchronizes a buffer object for CPU RW access. This means
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
+ * Returns
+ * -EBUSY if the buffer is busy and no_wait is true.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+extern int
+ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+
+/**
+ * ttm_bo_synccpu_write_release:
+ *
+ * @bo : The buffer object.
+ *
+ * Releases a synccpu lock.
+ */
+extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_acc_size
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo_size: size of the buffer object in byte.
+ * @struct_size: size of the structure holding buffer object datas
+ *
+ * Returns size to account for a buffer object
+ */
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size);
+
+/**
+ * ttm_bo_init
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
+ * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+
+extern int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interrubtible,
+ struct file *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ void (*destroy) (struct ttm_buffer_object *));
+
+/**
+ * ttm_bo_create
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep while waiting for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @p_bo: On successful completion *p_bo points to the created object.
+ *
+ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
+ * on that object. The destroy function is set to kfree().
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
+ */
+
+extern int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interruptible,
+ struct file *persistent_swap_storage,
+ struct ttm_buffer_object **p_bo);
+
+/**
+ * ttm_bo_init_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ * @p_size: size managed area in pages.
+ *
+ * Initialize a manager for a given memory type.
+ * Note: if part of driver firstopen, it must be protected from a
+ * potentially racing lastclose.
+ * Returns:
+ * -EINVAL: invalid size or memory type.
+ * -ENOMEM: Not enough memory.
+ * May also return driver-specified errors.
+ */
+
+extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ unsigned long p_size);
+/**
+ * ttm_bo_clean_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Take down a manager for a given memory type after first walking
+ * the LRU list to evict any buffers left alive.
+ *
+ * Normally, this function is part of lastclose() or unload(), and at that
+ * point there shouldn't be any buffers left created by user-space, since
+ * there should've been removed by the file descriptor release() method.
+ * However, before this function is run, make sure to signal all sync objects,
+ * and verify that the delayed delete queue is empty. The driver must also
+ * make sure that there are no NO_EVICT buffers present in this memory type
+ * when the call is made.
+ *
+ * If this function is part of a VT switch, the caller must make sure that
+ * there are no appications currently validating buffers before this
+ * function is called. The caller can do that by first taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: invalid or uninitialized memory type.
+ * -EBUSY: There are still buffers left in this memory type.
+ */
+
+extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_bo_evict_mm
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @mem_type: The memory type.
+ *
+ * Evicts all buffers on the lru list of the memory type.
+ * This is normally part of a VT switch or an
+ * out-of-memory-space-due-to-fragmentation handler.
+ * The caller must make sure that there are no other processes
+ * currently validating buffers, and can do that by taking the
+ * struct ttm_bo_device::ttm_lock in write mode.
+ *
+ * Returns:
+ * -EINVAL: Invalid or uninitialized memory type.
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
+ * evict a buffer.
+ */
+
+extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+ bool *is_iomem)
+{
+ *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
+ return map->virtual;
+}
+
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
+
+extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
+
+extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+
+/**
+ * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space. The address space will
+ * have the same size as the bo, and start at offset 0.
+ *
+ * This function is intended to be called by the fbdev mmap method
+ * if the fbdev address space is to be backed by a bo.
+ */
+
+extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
+ struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_mmap - mmap out of the ttm device address space.
+ *
+ * @filp: filp as input from the mmap method.
+ * @vma: vma as input from the mmap method.
+ * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ *
+ * This function is intended to be called by the device mmap method.
+ * if the device address space is to be backed by the bo manager.
+ */
+
+extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+ struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_io
+ *
+ * @bdev: Pointer to the struct ttm_bo_device.
+ * @filp: Pointer to the struct file attempting to read / write.
+ * @wbuf: User-space pointer to address of buffer to write. NULL on read.
+ * @rbuf: User-space pointer to address of buffer to read into.
+ * Null on write.
+ * @count: Number of bytes to read / write.
+ * @f_pos: Pointer to current file position.
+ * @write: 1 for read, 0 for write.
+ *
+ * This function implements read / write into ttm buffer objects, and is
+ * intended to
+ * be called from the fops::read and fops::write method.
+ * Returns:
+ * See man (2) write, man(2) read. In particular,
+ * the function may return -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+
+extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char __user *wbuf, char __user *rbuf,
+ size_t count, loff_t *f_pos, bool write);
+
+extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
+#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
new file mode 100644
index 000000000..813042ced
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -0,0 +1,1059 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+#ifndef _TTM_BO_DRIVER_H_
+#define _TTM_BO_DRIVER_H_
+
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_memory.h>
+#include <ttm/ttm_module.h>
+#include <ttm/ttm_placement.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_global.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/reservation.h>
+
+struct ttm_backend_func {
+ /**
+ * struct ttm_backend_func member bind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ * memory type and location for binding.
+ *
+ * Bind the backend pages into the aperture in the location
+ * indicated by @bo_mem. This function should be able to handle
+ * differences between aperture and system page sizes.
+ */
+ int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+ /**
+ * struct ttm_backend_func member unbind
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Unbind previously bound backend pages. This function should be
+ * able to handle differences between aperture and system page sizes.
+ */
+ int (*unbind) (struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_backend_func member destroy
+ *
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*destroy) (struct ttm_tt *ttm);
+};
+
+#define TTM_PAGE_FLAG_WRITE (1 << 3)
+#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
+#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
+#define TTM_PAGE_FLAG_DMA32 (1 << 7)
+#define TTM_PAGE_FLAG_SG (1 << 8)
+
+enum ttm_caching_state {
+ tt_uncached,
+ tt_wc,
+ tt_cached
+};
+
+/**
+ * struct ttm_tt
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @func: Pointer to a struct ttm_backend_func that describes
+ * the backend methods.
+ * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
+ * pointer.
+ * @pages: Array of pages backing the data.
+ * @num_pages: Number of pages in the page array.
+ * @bdev: Pointer to the current struct ttm_bo_device.
+ * @be: Pointer to the ttm backend.
+ * @swap_storage: Pointer to shmem struct file for swap storage.
+ * @caching_state: The current caching state of the pages.
+ * @state: The current binding state of the pages.
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+
+struct ttm_tt {
+ struct ttm_bo_device *bdev;
+ struct ttm_backend_func *func;
+ struct page *dummy_read_page;
+ struct page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ struct sg_table *sg; /* for SG objects via dma-buf */
+ struct ttm_bo_global *glob;
+ struct file *swap_storage;
+ enum ttm_caching_state caching_state;
+ enum {
+ tt_bound,
+ tt_unbound,
+ tt_unpopulated,
+ } state;
+};
+
+/**
+ * struct ttm_dma_tt
+ *
+ * @ttm: Base ttm_tt struct.
+ * @cpu_address: The CPU address of the pages
+ * @dma_address: The DMA (bus) addresses of the pages
+ * @pages_list: used by some page allocation backend
+ *
+ * This is a structure holding the pages, caching- and aperture binding
+ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * memory.
+ */
+struct ttm_dma_tt {
+ struct ttm_tt ttm;
+ void **cpu_address;
+ dma_addr_t *dma_address;
+ struct list_head pages_list;
+};
+
+#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
+#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
+#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
+
+struct ttm_mem_type_manager;
+
+struct ttm_mem_type_manager_func {
+ /**
+ * struct ttm_mem_type_manager member init
+ *
+ * @man: Pointer to a memory type manager.
+ * @p_size: Implementation dependent, but typically the size of the
+ * range to be managed in pages.
+ *
+ * Called to initialize a private range manager. The function is
+ * expected to initialize the man::priv member.
+ * Returns 0 on success, negative error code on failure.
+ */
+ int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+ /**
+ * struct ttm_mem_type_manager member takedown
+ *
+ * @man: Pointer to a memory type manager.
+ *
+ * Called to undo the setup done in init. All allocated resources
+ * should be freed.
+ */
+ int (*takedown)(struct ttm_mem_type_manager *man);
+
+ /**
+ * struct ttm_mem_type_manager member get_node
+ *
+ * @man: Pointer to a memory type manager.
+ * @bo: Pointer to the buffer object we're allocating space for.
+ * @placement: Placement details.
+ * @flags: Additional placement flags.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function should allocate space in the memory type managed
+ * by @man. Placement details if
+ * applicable are given by @placement. If successful,
+ * @mem::mm_node should be set to a non-null value, and
+ * @mem::start should be set to a value identifying the beginning
+ * of the range allocated, and the function should return zero.
+ * If the memory region accommodate the buffer object, @mem::mm_node
+ * should be set to NULL, and the function should return 0.
+ * If a system error occurred, preventing the request to be fulfilled,
+ * the function should return a negative error code.
+ *
+ * Note that @mem::mm_node will only be dereferenced by
+ * struct ttm_mem_type_manager functions and optionally by the driver,
+ * which has knowledge of the underlying type.
+ *
+ * This function may not be called from within atomic context, so
+ * an implementation can and must use either a mutex or a spinlock to
+ * protect any data structures managing the space.
+ */
+ int (*get_node)(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem);
+
+ /**
+ * struct ttm_mem_type_manager member put_node
+ *
+ * @man: Pointer to a memory type manager.
+ * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+ *
+ * This function frees memory type resources previously allocated
+ * and that are identified by @mem::mm_node and @mem::start. May not
+ * be called from within atomic context.
+ */
+ void (*put_node)(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem);
+
+ /**
+ * struct ttm_mem_type_manager member debug
+ *
+ * @man: Pointer to a memory type manager.
+ * @prefix: Prefix to be used in printout to identify the caller.
+ *
+ * This function is called to print out the state of the memory
+ * type manager to aid debugging of out-of-memory conditions.
+ * It may not be called from within atomic context.
+ */
+ void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
+
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
+struct ttm_mem_type_manager {
+ struct ttm_bo_device *bdev;
+
+ /*
+ * No protection. Constant from start.
+ */
+
+ bool has_type;
+ bool use_type;
+ uint32_t flags;
+ uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
+ uint64_t size;
+ uint32_t available_caching;
+ uint32_t default_caching;
+ const struct ttm_mem_type_manager_func *func;
+ void *priv;
+ struct mutex io_reserve_mutex;
+ bool use_io_reserve_lru;
+ bool io_reserve_fastpath;
+
+ /*
+ * Protected by @io_reserve_mutex:
+ */
+
+ struct list_head io_reserve_lru;
+
+ /*
+ * Protected by the global->lru_lock.
+ */
+
+ struct list_head lru;
+};
+
+/**
+ * struct ttm_bo_driver
+ *
+ * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
+ * @invalidate_caches: Callback to invalidate read caches when a buffer object
+ * has been evicted.
+ * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
+ * structure.
+ * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
+ * @move: Callback for a driver to hook in accelerated functions to
+ * move a buffer.
+ * If set to NULL, a potentially slow memcpy() move is used.
+ */
+
+struct ttm_bo_driver {
+ /**
+ * ttm_tt_create
+ *
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member invalidate_caches
+ *
+ * @bdev: the buffer object device.
+ * @flags: new placement of the rebound buffer object.
+ *
+ * A previosly evicted buffer has been rebound in a
+ * potentially new location. Tell the driver that it might
+ * consider invalidating read (texture) caches on the next command
+ * submission as a consequence.
+ */
+
+ int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
+ int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+
+ void(*evict_flags) (struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @interruptible: Use interruptible sleeps if possible when sleeping.
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
+ int (*move) (struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+ /**
+ * struct ttm_bo_driver_member verify_access
+ *
+ * @bo: Pointer to a buffer object.
+ * @filp: Pointer to a struct file trying to access the object.
+ *
+ * Called from the map / write / read methods to verify that the
+ * caller is permitted to access the buffer object.
+ * This member may be set to NULL, which will refuse this kind of
+ * access for all buffer objects.
+ * This function should return 0 if access is granted, -EPERM otherwise.
+ */
+ int (*verify_access) (struct ttm_buffer_object *bo,
+ struct file *filp);
+
+ /* hook to notify driver about a driver move so it
+ * can do tiling things */
+ void (*move_notify)(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *new_mem);
+ /* notify the driver we are taking a fault on this BO
+ * and have reserved it */
+ int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify) (struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
+};
+
+/**
+ * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
+ */
+
+struct ttm_bo_global_ref {
+ struct drm_global_reference ref;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_bo_global - Buffer object driver global data.
+ *
+ * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+
+struct ttm_bo_global {
+
+ /**
+ * Constant after init.
+ */
+
+ struct kobject kobj;
+ struct ttm_mem_global *mem_glob;
+ struct page *dummy_read_page;
+ struct ttm_mem_shrink shrink;
+ struct mutex device_list_mutex;
+ spinlock_t lru_lock;
+
+ /**
+ * Protected by device_list_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * Protected by the lru_lock.
+ */
+ struct list_head swap_lru;
+
+ /**
+ * Internal protection.
+ */
+ atomic_t bo_count;
+};
+
+
+#define TTM_NUM_MEM_TYPES 8
+
+#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
+ idling before CPU mapping */
+#define TTM_BO_PRIV_FLAG_MAX 1
+/**
+ * struct ttm_bo_device - Buffer object driver device-specific data.
+ *
+ * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
+ * @man: An array of mem_type_managers.
+ * @vma_manager: Address space manager
+ * lru_lock: Spinlock that protects the buffer+device lru lists and
+ * ddestroy lists.
+ * @val_seq: Current validation sequence.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ *
+ */
+
+struct ttm_bo_device {
+
+ /*
+ * Constant after bo device init / atomic.
+ */
+ struct list_head device_list;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_driver *driver;
+ struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+
+ /*
+ * Protected by internal locks.
+ */
+ struct drm_vma_offset_manager vma_manager;
+
+ /*
+ * Protected by the global:lru lock.
+ */
+ struct list_head ddestroy;
+ uint32_t val_seq;
+
+ /*
+ * Protected by load / firstopen / lastclose /unload sync.
+ */
+
+ struct address_space *dev_mapping;
+
+ /*
+ * Internal protection.
+ */
+
+ struct delayed_work wq;
+
+ bool need_dma32;
+};
+
+/**
+ * ttm_flag_masked
+ *
+ * @old: Pointer to the result and original value.
+ * @new: New value of bits.
+ * @mask: Mask of bits to change.
+ *
+ * Convenience function to change a number of bits identified by a mask.
+ */
+
+static inline uint32_t
+ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+{
+ *old ^= (*old ^ new) & mask;
+ return *old;
+}
+
+/**
+ * ttm_tt_init
+ *
+ * @ttm: The struct ttm_tt.
+ * @bdev: pointer to a struct ttm_bo_device:
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
+extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
+
+/**
+ * ttm_tt_fini
+ *
+ * @ttm: the ttm_tt structure.
+ *
+ * Free memory of ttm_tt structure
+ */
+extern void ttm_tt_fini(struct ttm_tt *ttm);
+extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+
+/**
+ * ttm_ttm_bind:
+ *
+ * @ttm: The struct ttm_tt containing backing pages.
+ * @bo_mem: The struct ttm_mem_reg identifying the binding location.
+ *
+ * Bind the pages of @ttm to an aperture location identified by @bo_mem
+ */
+extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
+/**
+ * ttm_ttm_destroy:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind, unpopulate and destroy common struct ttm_tt.
+ */
+extern void ttm_tt_destroy(struct ttm_tt *ttm);
+
+/**
+ * ttm_ttm_unbind:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Unbind a struct ttm_tt.
+ */
+extern void ttm_tt_unbind(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_swapin:
+ *
+ * @ttm: The struct ttm_tt.
+ *
+ * Swap in a previously swap out ttm_tt.
+ */
+extern int ttm_tt_swapin(struct ttm_tt *ttm);
+
+/**
+ * ttm_tt_set_placement_caching:
+ *
+ * @ttm A struct ttm_tt the backing pages of which will change caching policy.
+ * @placement: Flag indicating the desired caching policy.
+ *
+ * This function will change caching policy of any default kernel mappings of
+ * the pages backing @ttm. If changing from cached to uncached or
+ * write-combined,
+ * all CPU caches will first be flushed to make sure the data of the pages
+ * hit RAM. This function may be very costly as it involves global TLB
+ * and cache flushes and potential page splitting / combining.
+ */
+extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
+extern int ttm_tt_swapout(struct ttm_tt *ttm,
+ struct file *persistent_swap_storage);
+
+/**
+ * ttm_tt_unpopulate - free pages from a ttm
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Calls the driver method to free all pages from a ttm
+ */
+extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
+
+/*
+ * ttm_bo.c
+ */
+
+/**
+ * ttm_mem_reg_is_pci
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @mem: A valid struct ttm_mem_reg.
+ *
+ * Returns true if the memory described by @mem is PCI memory,
+ * false otherwise.
+ */
+extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+
+/**
+ * ttm_bo_mem_space
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_mem_reg.
+ * @interruptible: Sleep interruptible when sliping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @mem, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+ */
+extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible,
+ bool no_wait_gpu);
+
+extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+
+extern void ttm_bo_global_release(struct drm_global_reference *ref);
+extern int ttm_bo_global_init(struct drm_global_reference *ref);
+
+extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
+
+/**
+ * ttm_bo_device_init
+ *
+ * @bdev: A pointer to a struct ttm_bo_device to initialize.
+ * @glob: A pointer to an initialized struct ttm_bo_global.
+ * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
+ * @mapping: The address space to use for this bo.
+ * @file_page_offset: Offset into the device address space that is available
+ * for buffer data. This ensures compatibility with other users of the
+ * address space.
+ *
+ * Initializes a struct ttm_bo_device:
+ * Returns:
+ * !0: Failure.
+ */
+extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ struct ttm_bo_global *glob,
+ struct ttm_bo_driver *driver,
+ struct address_space *mapping,
+ uint64_t file_page_offset, bool need_dma32);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
+extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+ bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * __ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_ticket: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @ticket->stamp is older.
+ *
+ * Will not remove reserved buffers from the lru lists.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
+ */
+static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_ticket,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret = 0;
+
+ if (no_wait) {
+ bool success;
+ if (WARN_ON(ticket))
+ return -EBUSY;
+
+ success = ww_mutex_trylock(&bo->resv->lock);
+ return success ? 0 : -EBUSY;
+ }
+
+ if (interruptible)
+ ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
+ else
+ ret = ww_mutex_lock(&bo->resv->lock, ticket);
+ if (ret == -EINTR)
+ return -ERESTARTSYS;
+ return ret;
+}
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_ticket: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @ticket->stamp is older.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation) and removes it from lru lists, while taking
+ * a number of measures to prevent deadlocks.
+ *
+ * Deadlocks may occur when two processes try to reserve multiple buffers in
+ * different order, either by will or as a result of a buffer being evicted
+ * to make room for a buffer already reserved. (Buffers are reserved before
+ * they are evicted). The following algorithm prevents such deadlocks from
+ * occurring:
+ * Processes attempting to reserve multiple buffers other than for eviction,
+ * (typically execbuf), should first obtain a unique 32-bit
+ * validation sequence number,
+ * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
+ * sequence number. If upon call of this function, the buffer object is already
+ * reserved, the validation sequence is checked against the validation
+ * sequence of the process currently reserving the buffer,
+ * and if the current validation sequence is greater than that of the process
+ * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
+ * waiting for the buffer to become unreserved, after which it retries
+ * reserving.
+ * The caller should, when receiving an -EAGAIN error
+ * release all its buffer reservations, wait for @bo to become unreserved, and
+ * then rerun the validation with the same validation sequence. This procedure
+ * will always guarantee that the process with the lowest validation sequence
+ * will eventually succeed, preventing both deadlocks and starvation.
+ *
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
+ */
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_ticket,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret;
+
+ WARN_ON(!atomic_read(&bo->kref.refcount));
+
+ ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
+ if (likely(ret == 0))
+ ttm_bo_del_sub_from_lru(bo);
+
+ return ret;
+}
+
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret = 0;
+
+ WARN_ON(!atomic_read(&bo->kref.refcount));
+
+ if (interruptible)
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ticket);
+ else
+ ww_mutex_lock_slow(&bo->resv->lock, ticket);
+
+ if (likely(ret == 0))
+ ttm_bo_del_sub_from_lru(bo);
+ else if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+
+ return ret;
+}
+
+/**
+ * __ttm_bo_unreserve
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo where the buffer object is
+ * already on lru lists.
+ */
+static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ ww_mutex_unlock(&bo->resv->lock);
+}
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+ spin_lock(&bo->glob->lru_lock);
+ ttm_bo_add_to_lru(bo);
+ spin_unlock(&bo->glob->lru_lock);
+ }
+ __ttm_bo_unreserve(bo);
+}
+
+/**
+ * ttm_bo_unreserve_ticket
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @ticket: ww_acquire_ctx used for reserving
+ *
+ * Unreserve a previous reservation of @bo made with @ticket.
+ */
+static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
+ struct ww_acquire_ctx *t)
+{
+ ttm_bo_unreserve(bo);
+}
+
+/*
+ * ttm_bo_util.c
+ */
+
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
+/**
+ * ttm_bo_move_ttm
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Optimized move function for a buffer object with both old and
+ * new placement backed by a TTM. The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
+
+extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_free_old_node
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Utility function to free an old placement after a successful move.
+ */
+extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
+
+extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct fence *fence,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem);
+/**
+ * ttm_io_prot
+ *
+ * @c_state: Caching state.
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
+extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+
+#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+#define TTM_HAS_AGP
+#include <linux/agp_backend.h>
+
+/**
+ * ttm_agp_tt_create
+ *
+ * @bdev: Pointer to a struct ttm_bo_device.
+ * @bridge: The agp bridge this device is sitting on.
+ * @size: Size of the data needed backing.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @dummy_read_page: See struct ttm_bo_device.
+ *
+ *
+ * Create a TTM backend that uses the indicated AGP bridge as an aperture
+ * for TT memory. This function uses the linux agpgart interface to
+ * bind and unbind memory backing a ttm_tt.
+ */
+extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page);
+int ttm_agp_tt_populate(struct ttm_tt *ttm);
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+#endif
+
+#endif
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
new file mode 100644
index 000000000..b620c317c
--- /dev/null
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -0,0 +1,119 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include <ttm/ttm_bo_api.h>
+#include <linux/list.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head: list head for thread-private list.
+ * @bo: refcounted buffer object pointer.
+ * @shared: should the fence be added shared?
+ */
+
+struct ttm_validate_buffer {
+ struct list_head head;
+ struct ttm_buffer_object *bo;
+ bool shared;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @ticket: ww_acquire_ctx from reserve call
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
+ * non-blocking reserves should be tried.
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @intr: should the wait be interruptible
+ * @dups: [out] optional list of duplicates.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * If intr is set to true, this function may return -ERESTARTSYS if the
+ * calling process receives a signal while waiting. In that case, no
+ * buffers on the list will be reserved upon return.
+ *
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+ struct list_head *list, bool intr,
+ struct list_head *dups);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @ticket: ww_acquire_ctx from reserve call
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @fence: The new exclusive fence for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+ struct list_head *list,
+ struct fence *fence);
+
+#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
new file mode 100644
index 000000000..2902beb5f
--- /dev/null
+++ b/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,247 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <ttm/ttm_object.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ wait_queue_head_t queue;
+ spinlock_t lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
new file mode 100644
index 000000000..72dcbe81d
--- /dev/null
+++ b/include/drm/ttm/ttm_memory.h
@@ -0,0 +1,158 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef TTM_MEMORY_H
+#define TTM_MEMORY_H
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/bug.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/kobject.h>
+#include <linux/mm.h>
+
+/**
+ * struct ttm_mem_shrink - callback to shrink TTM memory usage.
+ *
+ * @do_shrink: The callback function.
+ *
+ * Arguments to the do_shrink functions are intended to be passed using
+ * inheritance. That is, the argument class derives from struct ttm_mem_shrink,
+ * and can be accessed using container_of().
+ */
+
+struct ttm_mem_shrink {
+ int (*do_shrink) (struct ttm_mem_shrink *);
+};
+
+/**
+ * struct ttm_mem_global - Global memory accounting structure.
+ *
+ * @shrink: A single callback to shrink TTM memory usage. Extend this
+ * to a linked list to be able to handle multiple callbacks when needed.
+ * @swap_queue: A workqueue to handle shrinking in low memory situations. We
+ * need a separate workqueue since it will spend a lot of time waiting
+ * for the GPU, and this will otherwise block other workqueue tasks(?)
+ * At this point we use only a single-threaded workqueue.
+ * @work: The workqueue callback for the shrink queue.
+ * @lock: Lock to protect the @shrink - and the memory accounting members,
+ * that is, essentially the whole structure with some exceptions.
+ * @zones: Array of pointers to accounting zones.
+ * @num_zones: Number of populated entries in the @zones array.
+ * @zone_kernel: Pointer to the kernel zone.
+ * @zone_highmem: Pointer to the highmem zone if there is one.
+ * @zone_dma32: Pointer to the dma32 zone if there is one.
+ *
+ * Note that this structure is not per device. It should be global for all
+ * graphics devices.
+ */
+
+#define TTM_MEM_MAX_ZONES 2
+struct ttm_mem_zone;
+struct ttm_mem_global {
+ struct kobject kobj;
+ struct ttm_mem_shrink *shrink;
+ struct workqueue_struct *swap_queue;
+ struct work_struct work;
+ spinlock_t lock;
+ struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
+ unsigned int num_zones;
+ struct ttm_mem_zone *zone_kernel;
+#ifdef CONFIG_HIGHMEM
+ struct ttm_mem_zone *zone_highmem;
+#else
+ struct ttm_mem_zone *zone_dma32;
+#endif
+};
+
+/**
+ * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
+ *
+ * @shrink: The object to initialize.
+ * @func: The callback function.
+ */
+
+static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
+ int (*func) (struct ttm_mem_shrink *))
+{
+ shrink->do_shrink = func;
+}
+
+/**
+ * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to register with.
+ * @shrink: An initialized struct ttm_mem_shrink object to register.
+ *
+ * Returns:
+ * -EBUSY: There's already a callback registered. (May change).
+ */
+
+static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
+ struct ttm_mem_shrink *shrink)
+{
+ spin_lock(&glob->lock);
+ if (glob->shrink != NULL) {
+ spin_unlock(&glob->lock);
+ return -EBUSY;
+ }
+ glob->shrink = shrink;
+ spin_unlock(&glob->lock);
+ return 0;
+}
+
+/**
+ * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
+ *
+ * @glob: The struct ttm_mem_global object to unregister from.
+ * @shrink: A previously registert struct ttm_mem_shrink object.
+ *
+ */
+
+static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
+ struct ttm_mem_shrink *shrink)
+{
+ spin_lock(&glob->lock);
+ BUG_ON(glob->shrink != shrink);
+ glob->shrink = NULL;
+ spin_unlock(&glob->lock);
+}
+
+extern int ttm_mem_global_init(struct ttm_mem_global *glob);
+extern void ttm_mem_global_release(struct ttm_mem_global *glob);
+extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ bool no_wait, bool interruptible);
+extern void ttm_mem_global_free(struct ttm_mem_global *glob,
+ uint64_t amount);
+extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+ struct page *page,
+ bool no_wait, bool interruptible);
+extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
+ struct page *page);
+extern size_t ttm_round_pot(size_t size);
+#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
new file mode 100644
index 000000000..45fa318c1
--- /dev/null
+++ b/include/drm/ttm/ttm_module.h
@@ -0,0 +1,40 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_MODULE_H_
+#define _TTM_MODULE_H_
+
+#include <linux/kernel.h>
+struct kobject;
+
+#define TTM_PFX "[TTM] "
+extern struct kobject *ttm_get_kobj(void);
+
+#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
new file mode 100644
index 000000000..ed953f98f
--- /dev/null
+++ b/include/drm/ttm/ttm_object.h
@@ -0,0 +1,350 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/list.h>
+#include <drm/drm_hashtab.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/dma-buf.h>
+#include <ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_prime_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1,
+ ttm_driver_type2,
+ ttm_driver_type3,
+ ttm_driver_type4,
+ ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * This function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ struct rcu_head rhead;
+ struct drm_hash_item hash;
+ enum ttm_object_type object_type;
+ bool shareable;
+ struct ttm_object_file *tfile;
+ struct kref refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+};
+
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+ struct ttm_base_object base;
+ struct mutex mutex;
+ size_t size;
+ enum ttm_object_type real_type;
+ struct dma_buf *dma_buf;
+ void (*refcount_release) (struct ttm_base_object **);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_lookup_for_ref
+ *
+ * @tdev: Pointer to a struct ttm_object_device.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * This function should only be used when the struct tfile associated with the
+ * caller doesn't yet have a reference to the base object.
+ */
+
+extern struct ttm_base_object *
+ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Checks that the base object is shareable and adds a ref object to it.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed);
+
+extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
+ struct ttm_base_object *base);
+
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @mem_glob: struct ttm_mem_global for memory accounting.
+ * @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+ unsigned int hash_order,
+ const struct dma_buf_ops *ops);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#define ttm_base_object_kfree(__object, __base)\
+ kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+ size_t size,
+ struct ttm_prime_object *prime,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release)
+ (struct ttm_base_object **),
+ void (*ref_obj_release)
+ (struct ttm_base_object *,
+ enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+ return (base->object_type == ttm_prime_type) ?
+ container_of(base, struct ttm_prime_object, base)->real_type :
+ base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+ int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+ uint32_t handle, uint32_t flags,
+ int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime) \
+ kfree_rcu(__obj, __prime.base.rhead)
+#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
new file mode 100644
index 000000000..49a828425
--- /dev/null
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) Red Hat Inc.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Jerome Glisse <jglisse@redhat.com>
+ */
+#ifndef TTM_PAGE_ALLOC
+#define TTM_PAGE_ALLOC
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_memory.h>
+
+struct device;
+
+/**
+ * Initialize pool allocator.
+ */
+int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+/**
+ * Free pool allocator.
+ */
+void ttm_page_alloc_fini(void);
+
+/**
+ * ttm_pool_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_pool_populate(struct ttm_tt *ttm);
+
+/**
+ * ttm_pool_unpopulate:
+ *
+ * @ttm: The struct ttm_tt which to free backing pages.
+ *
+ * Free all pages of @ttm
+ */
+extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+
+
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
+/**
+ * Initialize pool allocator.
+ */
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
+
+/**
+ * Free pool allocator.
+ */
+void ttm_dma_page_alloc_fini(void);
+
+/**
+ * Output the state of pools to debugfs file
+ */
+extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+
+extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+#else
+static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
+ unsigned max_pages)
+{
+ return -ENODEV;
+}
+
+static inline void ttm_dma_page_alloc_fini(void) { return; }
+
+static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ return 0;
+}
+static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ return -ENOMEM;
+}
+static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
new file mode 100644
index 000000000..8ed44f9bb
--- /dev/null
+++ b/include/drm/ttm/ttm_placement.h
@@ -0,0 +1,95 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_PLACEMENT_H_
+#define _TTM_PLACEMENT_H_
+/*
+ * Memory regions for data placement.
+ */
+
+#define TTM_PL_SYSTEM 0
+#define TTM_PL_TT 1
+#define TTM_PL_VRAM 2
+#define TTM_PL_PRIV0 3
+#define TTM_PL_PRIV1 4
+#define TTM_PL_PRIV2 5
+#define TTM_PL_PRIV3 6
+#define TTM_PL_PRIV4 7
+#define TTM_PL_PRIV5 8
+#define TTM_PL_SWAPPED 15
+
+#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
+#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
+#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
+#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
+#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
+#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
+#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
+#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
+#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
+#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
+#define TTM_PL_MASK_MEM 0x0000FFFF
+
+/*
+ * Other flags that affects data placement.
+ * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
+ * if available.
+ * TTM_PL_FLAG_SHARED means that another application may
+ * reference the buffer.
+ * TTM_PL_FLAG_NO_EVICT means that the buffer may never
+ * be evicted to make room for other buffers.
+ * TTM_PL_FLAG_TOPDOWN requests to be placed from the
+ * top of the memory area, instead of the bottom.
+ */
+
+#define TTM_PL_FLAG_CACHED (1 << 16)
+#define TTM_PL_FLAG_UNCACHED (1 << 17)
+#define TTM_PL_FLAG_WC (1 << 18)
+#define TTM_PL_FLAG_SHARED (1 << 20)
+#define TTM_PL_FLAG_NO_EVICT (1 << 21)
+#define TTM_PL_FLAG_TOPDOWN (1 << 22)
+
+#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
+ TTM_PL_FLAG_UNCACHED | \
+ TTM_PL_FLAG_WC)
+
+#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
+
+/*
+ * Access flags to be used for CPU- and GPU- mappings.
+ * The idea is that the TTM synchronization mechanism will
+ * allow concurrent READ access and exclusive write access.
+ * Currently GPU- and CPU accesses are exclusive.
+ */
+
+#define TTM_ACCESS_READ (1 << 0)
+#define TTM_ACCESS_WRITE (1 << 1)
+
+#endif