diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst
index f81020736ebfbe60efede8d094ef2db52bce6157..efb7771273bbc35e03e29f9e0f5b4613379a038b 100644
--- a/Documentation/accel/qaic/qaic.rst
+++ b/Documentation/accel/qaic/qaic.rst
@@ -93,8 +93,15 @@ commands (does not impact QAIC).
 uAPI
 ====
 
+QAIC creates an accel device per phsyical PCIe device. This accel device exists
+for as long as the PCIe device is known to Linux.
+
+The PCIe device may not be in the state to accept requests from userspace at
+all times. QAIC will trigger KOBJ_ONLINE/OFFLINE uevents to advertise when the
+device can accept requests (ONLINE) and when the device is no longer accepting
+requests (OFFLINE) because of a reset or other state transition.
+
 QAIC defines a number of driver specific IOCTLs as part of the userspace API.
-This section describes those APIs.
 
 DRM_IOCTL_QAIC_MANAGE
   This IOCTL allows userspace to send a NNC request to the QSM. The call will
diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst
index d3c1f6d8c0e0ec819843ad11bea22cad9614e657..6b5f7e6e7155fbd3c8af2ccb04c7207d43c996d0 100644
--- a/Documentation/core-api/pin_user_pages.rst
+++ b/Documentation/core-api/pin_user_pages.rst
@@ -153,6 +153,8 @@ NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's
 because DAX pages do not have a separate page cache, and so "pinning" implies
 locking down file system blocks, which is not (yet) supported in that way.
 
+.. _mmu-notifier-registration-case:
+
 CASE 3: MMU notifier registration, with or without page faulting hardware
 -------------------------------------------------------------------------
 Device drivers can pin pages via get_user_pages*(), and register for mmu
diff --git a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
index f201ae4af4fbd7ee377bf5719077705da0773858..2cef25215798598b8249c6fe3c20600c8d7b2aa1 100644
--- a/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
@@ -55,6 +55,27 @@ properties:
       - port@0
       - port@1
 
+  vcchdmipll-supply:
+    description: A 1.8V supply that powers the HDMI PLL.
+
+  vcchdmitx-supply:
+    description: A 1.8V supply that powers the HDMI TX part.
+
+  vcclvdspll-supply:
+    description: A 1.8V supply that powers the LVDS PLL.
+
+  vcclvdstx-supply:
+    description: A 1.8V supply that powers the LVDS TX part.
+
+  vccmipirx-supply:
+    description: A 1.8V supply that powers the MIPI RX part.
+
+  vccsysclk-supply:
+    description: A 1.8V supply that powers the SYSCLK.
+
+  vdd-supply:
+    description: A 1.8V supply that powers the digital part.
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index ffb35288ffbb4ee2be6daa3afd7a528febd8d059..916bb7f94206293bf9d7584a1dc77e482cc91ee6 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -23,6 +23,7 @@ properties:
     items:
       - enum:
           - hannstar,hsd060bhw4
+          - powkiddy,x55-panel
       - const: himax,hx8394
 
   reg: true
@@ -31,6 +32,8 @@ properties:
 
   backlight: true
 
+  rotation: true
+
   port: true
 
   vcc-supply:
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
index e7ab6224b52e09437207a7a6bcd2d0af2bc31cd5..b1e624be3e3349e08e8d2784a4a258e0718b9e1a 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
@@ -16,6 +16,7 @@ properties:
   compatible:
     items:
       - enum:
+          - ampire,am8001280g
           - bananapi,lhr050h41
           - feixin,k101-im2byl02
           - tdo,tl050hdv35
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 3ec9ee95045fbce8db9f623753b2f67fb79ccd45..2021aa82871af2f8082a088dfd816838a3c89528 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -73,6 +73,8 @@ properties:
       - auo,t215hvn01
         # Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
       - avic,tm070ddh03
+        # BOE BP101WX1-100 10.1" WXGA (1280x800) LVDS panel
+      - boe,bp101wx1-100
         # BOE EV121WXM-N10-1850 12.1" WXGA (1280x800) TFT LCD panel
       - boe,ev121wxm-n10-1850
         # BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
@@ -144,6 +146,8 @@ properties:
       - edt,etmv570g2dhu
         # E Ink VB3300-KCA
       - eink,vb3300-kca
+        # Evervision Electronics Co. Ltd. VGG644804 5.7" VGA TFT LCD Panel
+      - evervision,vgg644804
         # Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
       - evervision,vgg804821
         # Foxlink Group 5" WVGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index ae09cd3cbce1fcb138459ab16e5872190fd9e5de..b6767ef0d24dec8bee13006343dbc2d68821a110 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -23,6 +23,7 @@ properties:
   compatible:
     enum:
       - ti,am625-dss
+      - ti,am62a7,dss
       - ti,am65x-dss
 
   reg:
@@ -87,6 +88,7 @@ properties:
           For AM65x DSS, the OLDI output port node from video port 1.
           For AM625 DSS, the internal DPI output port node from video
           port 1.
+          For AM62A7 DSS, the port is tied off inside the SoC.
 
       port@1:
         $ref: /schemas/graph.yaml#/properties/port
@@ -108,6 +110,18 @@ properties:
       Input memory (from main memory to dispc) bandwidth limit in
       bytes per second
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: ti,am62a7-dss
+    then:
+      properties:
+        ports:
+          properties:
+            port@0: false
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index 0fae1ef013be8b6853c60f71e42e9cdfee412ff4..abd4aa335fbcebafc9164bd4963f9db60f0450c4 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -29,6 +29,7 @@ properties:
               - allwinner,sun50i-a64-mali
               - rockchip,rk3036-mali
               - rockchip,rk3066-mali
+              - rockchip,rk3128-mali
               - rockchip,rk3188-mali
               - rockchip,rk3228-mali
               - samsung,exynos4210-mali
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 270d320407c7c0e5f38651f2bcabd30bcd784a42..13d3627d8bc083f0eb48b4c3f955f5c8e7117489 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -548,6 +548,8 @@ Plane Composition Properties
 .. kernel-doc:: drivers/gpu/drm/drm_blend.c
    :doc: overview
 
+.. _damage_tracking_properties:
+
 Damage Tracking Properties
 --------------------------
 
@@ -579,6 +581,12 @@ Variable Refresh Properties
 .. kernel-doc:: drivers/gpu/drm/drm_connector.c
    :doc: Variable refresh properties
 
+Cursor Hotspot Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane.c
+   :doc: hotspot properties
+
 Existing KMS Properties
 -----------------------
 
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index acc5901ac84088f12f5cf498f387384224b2b9e1..d55751cad67cf9748c60c0b2bfa75f5ebf5f58a4 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -466,6 +466,8 @@ DRM MM Range Allocator Function References
 .. kernel-doc:: drivers/gpu/drm/drm_mm.c
    :export:
 
+.. _drm_gpuvm:
+
 DRM GPUVM
 =========
 
@@ -481,6 +483,8 @@ Split and Merge
 .. kernel-doc:: drivers/gpu/drm/drm_gpuvm.c
    :doc: Split and Merge
 
+.. _drm_gpuvm_locking:
+
 Locking
 -------
 
diff --git a/Documentation/gpu/drm-vm-bind-locking.rst b/Documentation/gpu/drm-vm-bind-locking.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a345aa513d12b38d86adb266fb6d581a3cdcd617
--- /dev/null
+++ b/Documentation/gpu/drm-vm-bind-locking.rst
@@ -0,0 +1,582 @@
+.. SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+===============
+VM_BIND locking
+===============
+
+This document attempts to describe what's needed to get VM_BIND locking right,
+including the userptr mmu_notifier locking. It also discusses some
+optimizations to get rid of the looping through of all userptr mappings and
+external / shared object mappings that is needed in the simplest
+implementation. In addition, there is a section describing the VM_BIND locking
+required for implementing recoverable pagefaults.
+
+The DRM GPUVM set of helpers
+============================
+
+There is a set of helpers for drivers implementing VM_BIND, and this
+set of helpers implements much, but not all of the locking described
+in this document. In particular, it is currently lacking a userptr
+implementation. This document does not intend to describe the DRM GPUVM
+implementation in detail, but it is covered in :ref:`its own
+documentation <drm_gpuvm>`. It is highly recommended for any driver
+implementing VM_BIND to use the DRM GPUVM helpers and to extend it if
+common functionality is missing.
+
+Nomenclature
+============
+
+* ``gpu_vm``: Abstraction of a virtual GPU address space with
+  meta-data. Typically one per client (DRM file-private), or one per
+  execution context.
+* ``gpu_vma``: Abstraction of a GPU address range within a gpu_vm with
+  associated meta-data. The backing storage of a gpu_vma can either be
+  a GEM object or anonymous or page-cache pages mapped also into the CPU
+  address space for the process.
+* ``gpu_vm_bo``: Abstracts the association of a GEM object and
+  a VM. The GEM object maintains a list of gpu_vm_bos, where each gpu_vm_bo
+  maintains a list of gpu_vmas.
+* ``userptr gpu_vma or just userptr``: A gpu_vma, whose backing store
+  is anonymous or page-cache pages as described above.
+* ``revalidating``: Revalidating a gpu_vma means making the latest version
+  of the backing store resident and making sure the gpu_vma's
+  page-table entries point to that backing store.
+* ``dma_fence``: A struct dma_fence that is similar to a struct completion
+  and which tracks GPU activity. When the GPU activity is finished,
+  the dma_fence signals. Please refer to the ``DMA Fences`` section of
+  the :doc:`dma-buf doc </driver-api/dma-buf>`.
+* ``dma_resv``: A struct dma_resv (a.k.a reservation object) that is used
+  to track GPU activity in the form of multiple dma_fences on a
+  gpu_vm or a GEM object. The dma_resv contains an array / list
+  of dma_fences and a lock that needs to be held when adding
+  additional dma_fences to the dma_resv. The lock is of a type that
+  allows deadlock-safe locking of multiple dma_resvs in arbitrary
+  order. Please refer to the ``Reservation Objects`` section of the
+  :doc:`dma-buf doc </driver-api/dma-buf>`.
+* ``exec function``: An exec function is a function that revalidates all
+  affected gpu_vmas, submits a GPU command batch and registers the
+  dma_fence representing the GPU command's activity with all affected
+  dma_resvs. For completeness, although not covered by this document,
+  it's worth mentioning that an exec function may also be the
+  revalidation worker that is used by some drivers in compute /
+  long-running mode.
+* ``local object``: A GEM object which is only mapped within a
+  single VM. Local GEM objects share the gpu_vm's dma_resv.
+* ``external object``: a.k.a shared object: A GEM object which may be shared
+  by multiple gpu_vms and whose backing storage may be shared with
+  other drivers.
+
+Locks and locking order
+=======================
+
+One of the benefits of VM_BIND is that local GEM objects share the gpu_vm's
+dma_resv object and hence the dma_resv lock. So, even with a huge
+number of local GEM objects, only one lock is needed to make the exec
+sequence atomic.
+
+The following locks and locking orders are used:
+
+* The ``gpu_vm->lock`` (optionally an rwsem). Protects the gpu_vm's
+  data structure keeping track of gpu_vmas. It can also protect the
+  gpu_vm's list of userptr gpu_vmas. With a CPU mm analogy this would
+  correspond to the mmap_lock. An rwsem allows several readers to walk
+  the VM tree concurrently, but the benefit of that concurrency most
+  likely varies from driver to driver.
+* The ``userptr_seqlock``. This lock is taken in read mode for each
+  userptr gpu_vma on the gpu_vm's userptr list, and in write mode during mmu
+  notifier invalidation. This is not a real seqlock but described in
+  ``mm/mmu_notifier.c`` as a "Collision-retry read-side/write-side
+  'lock' a lot like a seqcount. However this allows multiple
+  write-sides to hold it at once...". The read side critical section
+  is enclosed by ``mmu_interval_read_begin() /
+  mmu_interval_read_retry()`` with ``mmu_interval_read_begin()``
+  sleeping if the write side is held.
+  The write side is held by the core mm while calling mmu interval
+  invalidation notifiers.
+* The ``gpu_vm->resv`` lock. Protects the gpu_vm's list of gpu_vmas needing
+  rebinding, as well as the residency state of all the gpu_vm's local
+  GEM objects.
+  Furthermore, it typically protects the gpu_vm's list of evicted and
+  external GEM objects.
+* The ``gpu_vm->userptr_notifier_lock``. This is an rwsem that is
+  taken in read mode during exec and write mode during a mmu notifier
+  invalidation. The userptr notifier lock is per gpu_vm.
+* The ``gem_object->gpuva_lock`` This lock protects the GEM object's
+  list of gpu_vm_bos. This is usually the same lock as the GEM
+  object's dma_resv, but some drivers protects this list differently,
+  see below.
+* The ``gpu_vm list spinlocks``. With some implementations they are needed
+  to be able to update the gpu_vm evicted- and external object
+  list. For those implementations, the spinlocks are grabbed when the
+  lists are manipulated. However, to avoid locking order violations
+  with the dma_resv locks, a special scheme is needed when iterating
+  over the lists.
+
+.. _gpu_vma lifetime:
+
+Protection and lifetime of gpu_vm_bos and gpu_vmas
+==================================================
+
+The GEM object's list of gpu_vm_bos, and the gpu_vm_bo's list of gpu_vmas
+is protected by the ``gem_object->gpuva_lock``, which is typically the
+same as the GEM object's dma_resv, but if the driver
+needs to access these lists from within a dma_fence signalling
+critical section, it can instead choose to protect it with a
+separate lock, which can be locked from within the dma_fence signalling
+critical section. Such drivers then need to pay additional attention
+to what locks need to be taken from within the loop when iterating
+over the gpu_vm_bo and gpu_vma lists to avoid locking-order violations.
+
+The DRM GPUVM set of helpers provide lockdep asserts that this lock is
+held in relevant situations and also provides a means of making itself
+aware of which lock is actually used: :c:func:`drm_gem_gpuva_set_lock`.
+
+Each gpu_vm_bo holds a reference counted pointer to the underlying GEM
+object, and each gpu_vma holds a reference counted pointer to the
+gpu_vm_bo. When iterating over the GEM object's list of gpu_vm_bos and
+over the gpu_vm_bo's list of gpu_vmas, the ``gem_object->gpuva_lock`` must
+not be dropped, otherwise, gpu_vmas attached to a gpu_vm_bo may
+disappear without notice since those are not reference-counted. A
+driver may implement its own scheme to allow this at the expense of
+additional complexity, but this is outside the scope of this document.
+
+In the DRM GPUVM implementation, each gpu_vm_bo and each gpu_vma
+holds a reference count on the gpu_vm itself. Due to this, and to avoid circular
+reference counting, cleanup of the gpu_vm's gpu_vmas must not be done from the
+gpu_vm's destructor. Drivers typically implements a gpu_vm close
+function for this cleanup. The gpu_vm close function will abort gpu
+execution using this VM, unmap all gpu_vmas and release page-table memory.
+
+Revalidation and eviction of local objects
+==========================================
+
+Note that in all the code examples given below we use simplified
+pseudo-code. In particular, the dma_resv deadlock avoidance algorithm
+as well as reserving memory for dma_resv fences is left out.
+
+Revalidation
+____________
+With VM_BIND, all local objects need to be resident when the gpu is
+executing using the gpu_vm, and the objects need to have valid
+gpu_vmas set up pointing to them. Typically, each gpu command buffer
+submission is therefore preceded with a re-validation section:
+
+.. code-block:: C
+
+   dma_resv_lock(gpu_vm->resv);
+
+   // Validation section starts here.
+   for_each_gpu_vm_bo_on_evict_list(&gpu_vm->evict_list, &gpu_vm_bo) {
+           validate_gem_bo(&gpu_vm_bo->gem_bo);
+
+           // The following list iteration needs the Gem object's
+           // dma_resv to be held (it protects the gpu_vm_bo's list of
+           // gpu_vmas, but since local gem objects share the gpu_vm's
+           // dma_resv, it is already held at this point.
+           for_each_gpu_vma_of_gpu_vm_bo(&gpu_vm_bo, &gpu_vma)
+                  move_gpu_vma_to_rebind_list(&gpu_vma, &gpu_vm->rebind_list);
+   }
+
+   for_each_gpu_vma_on_rebind_list(&gpu vm->rebind_list, &gpu_vma) {
+           rebind_gpu_vma(&gpu_vma);
+           remove_gpu_vma_from_rebind_list(&gpu_vma);
+   }
+   // Validation section ends here, and job submission starts.
+
+   add_dependencies(&gpu_job, &gpu_vm->resv);
+   job_dma_fence = gpu_submit(&gpu_job));
+
+   add_dma_fence(job_dma_fence, &gpu_vm->resv);
+   dma_resv_unlock(gpu_vm->resv);
+
+The reason for having a separate gpu_vm rebind list is that there
+might be userptr gpu_vmas that are not mapping a buffer object that
+also need rebinding.
+
+Eviction
+________
+
+Eviction of one of these local objects will then look similar to the
+following:
+
+.. code-block:: C
+
+   obj = get_object_from_lru();
+
+   dma_resv_lock(obj->resv);
+   for_each_gpu_vm_bo_of_obj(obj, &gpu_vm_bo);
+           add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list);
+
+   add_dependencies(&eviction_job, &obj->resv);
+   job_dma_fence = gpu_submit(&eviction_job);
+   add_dma_fence(&obj->resv, job_dma_fence);
+
+   dma_resv_unlock(&obj->resv);
+   put_object(obj);
+
+Note that since the object is local to the gpu_vm, it will share the gpu_vm's
+dma_resv lock such that ``obj->resv == gpu_vm->resv``.
+The gpu_vm_bos marked for eviction are put on the gpu_vm's evict list,
+which is protected by ``gpu_vm->resv``. During eviction all local
+objects have their dma_resv locked and, due to the above equality, also
+the gpu_vm's dma_resv protecting the gpu_vm's evict list is locked.
+
+With VM_BIND, gpu_vmas don't need to be unbound before eviction,
+since the driver must ensure that the eviction blit or copy will wait
+for GPU idle or depend on all previous GPU activity. Furthermore, any
+subsequent attempt by the GPU to access freed memory through the
+gpu_vma will be preceded by a new exec function, with a revalidation
+section which will make sure all gpu_vmas are rebound. The eviction
+code holding the object's dma_resv while revalidating will ensure a
+new exec function may not race with the eviction.
+
+A driver can be implemented in such a way that, on each exec function,
+only a subset of vmas are selected for rebind.  In this case, all vmas that are
+*not* selected for rebind must be unbound before the exec
+function workload is submitted.
+
+Locking with external buffer objects
+====================================
+
+Since external buffer objects may be shared by multiple gpu_vm's they
+can't share their reservation object with a single gpu_vm. Instead
+they need to have a reservation object of their own. The external
+objects bound to a gpu_vm using one or many gpu_vmas are therefore put on a
+per-gpu_vm list which is protected by the gpu_vm's dma_resv lock or
+one of the :ref:`gpu_vm list spinlocks <Spinlock iteration>`. Once
+the gpu_vm's reservation object is locked, it is safe to traverse the
+external object list and lock the dma_resvs of all external
+objects. However, if instead a list spinlock is used, a more elaborate
+iteration scheme needs to be used.
+
+At eviction time, the gpu_vm_bos of *all* the gpu_vms an external
+object is bound to need to be put on their gpu_vm's evict list.
+However, when evicting an external object, the dma_resvs of the
+gpu_vms the object is bound to are typically not held. Only
+the object's private dma_resv can be guaranteed to be held. If there
+is a ww_acquire context at hand at eviction time we could grab those
+dma_resvs but that could cause expensive ww_mutex rollbacks. A simple
+option is to just mark the gpu_vm_bos of the evicted gem object with
+an ``evicted`` bool that is inspected before the next time the
+corresponding gpu_vm evicted list needs to be traversed. For example, when
+traversing the list of external objects and locking them. At that time,
+both the gpu_vm's dma_resv and the object's dma_resv is held, and the
+gpu_vm_bo marked evicted, can then be added to the gpu_vm's list of
+evicted gpu_vm_bos. The ``evicted`` bool is formally protected by the
+object's dma_resv.
+
+The exec function becomes
+
+.. code-block:: C
+
+   dma_resv_lock(gpu_vm->resv);
+
+   // External object list is protected by the gpu_vm->resv lock.
+   for_each_gpu_vm_bo_on_extobj_list(gpu_vm, &gpu_vm_bo) {
+           dma_resv_lock(gpu_vm_bo.gem_obj->resv);
+           if (gpu_vm_bo_marked_evicted(&gpu_vm_bo))
+                   add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list);
+   }
+
+   for_each_gpu_vm_bo_on_evict_list(&gpu_vm->evict_list, &gpu_vm_bo) {
+           validate_gem_bo(&gpu_vm_bo->gem_bo);
+
+           for_each_gpu_vma_of_gpu_vm_bo(&gpu_vm_bo, &gpu_vma)
+                  move_gpu_vma_to_rebind_list(&gpu_vma, &gpu_vm->rebind_list);
+   }
+
+   for_each_gpu_vma_on_rebind_list(&gpu vm->rebind_list, &gpu_vma) {
+           rebind_gpu_vma(&gpu_vma);
+           remove_gpu_vma_from_rebind_list(&gpu_vma);
+   }
+
+   add_dependencies(&gpu_job, &gpu_vm->resv);
+   job_dma_fence = gpu_submit(&gpu_job));
+
+   add_dma_fence(job_dma_fence, &gpu_vm->resv);
+   for_each_external_obj(gpu_vm, &obj)
+          add_dma_fence(job_dma_fence, &obj->resv);
+   dma_resv_unlock_all_resv_locks();
+
+And the corresponding shared-object aware eviction would look like:
+
+.. code-block:: C
+
+   obj = get_object_from_lru();
+
+   dma_resv_lock(obj->resv);
+   for_each_gpu_vm_bo_of_obj(obj, &gpu_vm_bo)
+           if (object_is_vm_local(obj))
+                add_gpu_vm_bo_to_evict_list(&gpu_vm_bo, &gpu_vm->evict_list);
+           else
+                mark_gpu_vm_bo_evicted(&gpu_vm_bo);
+
+   add_dependencies(&eviction_job, &obj->resv);
+   job_dma_fence = gpu_submit(&eviction_job);
+   add_dma_fence(&obj->resv, job_dma_fence);
+
+   dma_resv_unlock(&obj->resv);
+   put_object(obj);
+
+.. _Spinlock iteration:
+
+Accessing the gpu_vm's lists without the dma_resv lock held
+===========================================================
+
+Some drivers will hold the gpu_vm's dma_resv lock when accessing the
+gpu_vm's evict list and external objects lists. However, there are
+drivers that need to access these lists without the dma_resv lock
+held, for example due to asynchronous state updates from within the
+dma_fence signalling critical path. In such cases, a spinlock can be
+used to protect manipulation of the lists. However, since higher level
+sleeping locks need to be taken for each list item while iterating
+over the lists, the items already iterated over need to be
+temporarily moved to a private list and the spinlock released
+while processing each item:
+
+.. code block:: C
+
+    struct list_head still_in_list;
+
+    INIT_LIST_HEAD(&still_in_list);
+
+    spin_lock(&gpu_vm->list_lock);
+    do {
+            struct list_head *entry = list_first_entry_or_null(&gpu_vm->list, head);
+
+            if (!entry)
+                    break;
+
+            list_move_tail(&entry->head, &still_in_list);
+            list_entry_get_unless_zero(entry);
+            spin_unlock(&gpu_vm->list_lock);
+
+            process(entry);
+
+            spin_lock(&gpu_vm->list_lock);
+            list_entry_put(entry);
+    } while (true);
+
+    list_splice_tail(&still_in_list, &gpu_vm->list);
+    spin_unlock(&gpu_vm->list_lock);
+
+Due to the additional locking and atomic operations, drivers that *can*
+avoid accessing the gpu_vm's list outside of the dma_resv lock
+might want to avoid also this iteration scheme. Particularly, if the
+driver anticipates a large number of list items. For lists where the
+anticipated number of list items is small, where list iteration doesn't
+happen very often or if there is a significant additional cost
+associated with each iteration, the atomic operation overhead
+associated with this type of iteration is, most likely, negligible. Note that
+if this scheme is used, it is necessary to make sure this list
+iteration is protected by an outer level lock or semaphore, since list
+items are temporarily pulled off the list while iterating, and it is
+also worth mentioning that the local list ``still_in_list`` should
+also be considered protected by the ``gpu_vm->list_lock``, and it is
+thus possible that items can be removed also from the local list
+concurrently with list iteration.
+
+Please refer to the :ref:`DRM GPUVM locking section
+<drm_gpuvm_locking>` and its internal
+:c:func:`get_next_vm_bo_from_list` function.
+
+
+userptr gpu_vmas
+================
+
+A userptr gpu_vma is a gpu_vma that, instead of mapping a buffer object to a
+GPU virtual address range, directly maps a CPU mm range of anonymous-
+or file page-cache pages.
+A very simple approach would be to just pin the pages using
+pin_user_pages() at bind time and unpin them at unbind time, but this
+creates a Denial-Of-Service vector since a single user-space process
+would be able to pin down all of system memory, which is not
+desirable. (For special use-cases and assuming proper accounting pinning might
+still be a desirable feature, though). What we need to do in the
+general case is to obtain a reference to the desired pages, make sure
+we are notified using a MMU notifier just before the CPU mm unmaps the
+pages, dirty them if they are not mapped read-only to the GPU, and
+then drop the reference.
+When we are notified by the MMU notifier that CPU mm is about to drop the
+pages, we need to stop GPU access to the pages by waiting for VM idle
+in the MMU notifier and make sure that before the next time the GPU
+tries to access whatever is now present in the CPU mm range, we unmap
+the old pages from the GPU page tables and repeat the process of
+obtaining new page references. (See the :ref:`notifier example
+<Invalidation example>` below). Note that when the core mm decides to
+laundry pages, we get such an unmap MMU notification and can mark the
+pages dirty again before the next GPU access. We also get similar MMU
+notifications for NUMA accounting which the GPU driver doesn't really
+need to care about, but so far it has proven difficult to exclude
+certain notifications.
+
+Using a MMU notifier for device DMA (and other methods) is described in
+:ref:`the pin_user_pages() documentation <mmu-notifier-registration-case>`.
+
+Now, the method of obtaining struct page references using
+get_user_pages() unfortunately can't be used under a dma_resv lock
+since that would violate the locking order of the dma_resv lock vs the
+mmap_lock that is grabbed when resolving a CPU pagefault. This means
+the gpu_vm's list of userptr gpu_vmas needs to be protected by an
+outer lock, which in our example below is the ``gpu_vm->lock``.
+
+The MMU interval seqlock for a userptr gpu_vma is used in the following
+way:
+
+.. code-block:: C
+
+   // Exclusive locking mode here is strictly needed only if there are
+   // invalidated userptr gpu_vmas present, to avoid concurrent userptr
+   // revalidations of the same userptr gpu_vma.
+   down_write(&gpu_vm->lock);
+   retry:
+
+   // Note: mmu_interval_read_begin() blocks until there is no
+   // invalidation notifier running anymore.
+   seq = mmu_interval_read_begin(&gpu_vma->userptr_interval);
+   if (seq != gpu_vma->saved_seq) {
+           obtain_new_page_pointers(&gpu_vma);
+           dma_resv_lock(&gpu_vm->resv);
+           add_gpu_vma_to_revalidate_list(&gpu_vma, &gpu_vm);
+           dma_resv_unlock(&gpu_vm->resv);
+           gpu_vma->saved_seq = seq;
+   }
+
+   // The usual revalidation goes here.
+
+   // Final userptr sequence validation may not happen before the
+   // submission dma_fence is added to the gpu_vm's resv, from the POW
+   // of the MMU invalidation notifier. Hence the
+   // userptr_notifier_lock that will make them appear atomic.
+
+   add_dependencies(&gpu_job, &gpu_vm->resv);
+   down_read(&gpu_vm->userptr_notifier_lock);
+   if (mmu_interval_read_retry(&gpu_vma->userptr_interval, gpu_vma->saved_seq)) {
+          up_read(&gpu_vm->userptr_notifier_lock);
+          goto retry;
+   }
+
+   job_dma_fence = gpu_submit(&gpu_job));
+
+   add_dma_fence(job_dma_fence, &gpu_vm->resv);
+
+   for_each_external_obj(gpu_vm, &obj)
+          add_dma_fence(job_dma_fence, &obj->resv);
+
+   dma_resv_unlock_all_resv_locks();
+   up_read(&gpu_vm->userptr_notifier_lock);
+   up_write(&gpu_vm->lock);
+
+The code between ``mmu_interval_read_begin()`` and the
+``mmu_interval_read_retry()`` marks the read side critical section of
+what we call the ``userptr_seqlock``. In reality, the gpu_vm's userptr
+gpu_vma list is looped through, and the check is done for *all* of its
+userptr gpu_vmas, although we only show a single one here.
+
+The userptr gpu_vma MMU invalidation notifier might be called from
+reclaim context and, again, to avoid locking order violations, we can't
+take any dma_resv lock nor the gpu_vm->lock from within it.
+
+.. _Invalidation example:
+.. code-block:: C
+
+  bool gpu_vma_userptr_invalidate(userptr_interval, cur_seq)
+  {
+          // Make sure the exec function either sees the new sequence
+          // and backs off or we wait for the dma-fence:
+
+          down_write(&gpu_vm->userptr_notifier_lock);
+          mmu_interval_set_seq(userptr_interval, cur_seq);
+          up_write(&gpu_vm->userptr_notifier_lock);
+
+          // At this point, the exec function can't succeed in
+          // submitting a new job, because cur_seq is an invalid
+          // sequence number and will always cause a retry. When all
+          // invalidation callbacks, the mmu notifier core will flip
+          // the sequence number to a valid one. However we need to
+          // stop gpu access to the old pages here.
+
+          dma_resv_wait_timeout(&gpu_vm->resv, DMA_RESV_USAGE_BOOKKEEP,
+                                false, MAX_SCHEDULE_TIMEOUT);
+          return true;
+  }
+
+When this invalidation notifier returns, the GPU can no longer be
+accessing the old pages of the userptr gpu_vma and needs to redo the
+page-binding before a new GPU submission can succeed.
+
+Efficient userptr gpu_vma exec_function iteration
+_________________________________________________
+
+If the gpu_vm's list of userptr gpu_vmas becomes large, it's
+inefficient to iterate through the complete lists of userptrs on each
+exec function to check whether each userptr gpu_vma's saved
+sequence number is stale. A solution to this is to put all
+*invalidated* userptr gpu_vmas on a separate gpu_vm list and
+only check the gpu_vmas present on this list on each exec
+function. This list will then lend itself very-well to the spinlock
+locking scheme that is
+:ref:`described in the spinlock iteration section <Spinlock iteration>`, since
+in the mmu notifier, where we add the invalidated gpu_vmas to the
+list, it's not possible to take any outer locks like the
+``gpu_vm->lock`` or the ``gpu_vm->resv`` lock. Note that the
+``gpu_vm->lock`` still needs to be taken while iterating to ensure the list is
+complete, as also mentioned in that section.
+
+If using an invalidated userptr list like this, the retry check in the
+exec function trivially becomes a check for invalidated list empty.
+
+Locking at bind and unbind time
+===============================
+
+At bind time, assuming a GEM object backed gpu_vma, each
+gpu_vma needs to be associated with a gpu_vm_bo and that
+gpu_vm_bo in turn needs to be added to the GEM object's
+gpu_vm_bo list, and possibly to the gpu_vm's external object
+list. This is referred to as *linking* the gpu_vma, and typically
+requires that the ``gpu_vm->lock`` and the ``gem_object->gpuva_lock``
+are held. When unlinking a gpu_vma the same locks should be held,
+and that ensures that when iterating over ``gpu_vmas`, either under
+the ``gpu_vm->resv`` or the GEM object's dma_resv, that the gpu_vmas
+stay alive as long as the lock under which we iterate is not released. For
+userptr gpu_vmas it's similarly required that during vma destroy, the
+outer ``gpu_vm->lock`` is held, since otherwise when iterating over
+the invalidated userptr list as described in the previous section,
+there is nothing keeping those userptr gpu_vmas alive.
+
+Locking for recoverable page-fault page-table updates
+=====================================================
+
+There are two important things we need to ensure with locking for
+recoverable page-faults:
+
+* At the time we return pages back to the system / allocator for
+  reuse, there should be no remaining GPU mappings and any GPU TLB
+  must have been flushed.
+* The unmapping and mapping of a gpu_vma must not race.
+
+Since the unmapping (or zapping) of GPU ptes is typically taking place
+where it is hard or even impossible to take any outer level locks we
+must either introduce a new lock that is held at both mapping and
+unmapping time, or look at the locks we do hold at unmapping time and
+make sure that they are held also at mapping time. For userptr
+gpu_vmas, the ``userptr_seqlock`` is held in write mode in the mmu
+invalidation notifier where zapping happens. Hence, if the
+``userptr_seqlock`` as well as the ``gpu_vm->userptr_notifier_lock``
+is held in read mode during mapping, it will not race with the
+zapping. For GEM object backed gpu_vmas, zapping will take place under
+the GEM object's dma_resv and ensuring that the dma_resv is held also
+when populating the page-tables for any gpu_vma pointing to the GEM
+object, will similarly ensure we are race-free.
+
+If any part of the mapping is performed asynchronously
+under a dma-fence with these locks released, the zapping will need to
+wait for that dma-fence to signal under the relevant lock before
+starting to modify the page-table.
+
+Since modifying the
+page-table structure in a way that frees up page-table memory
+might also require outer level locks, the zapping of GPU ptes
+typically focuses only on zeroing page-table or page-directory entries
+and flushing TLB, whereas freeing of page-table memory is deferred to
+unbind or rebind time.
diff --git a/Documentation/gpu/imagination/index.rst b/Documentation/gpu/imagination/index.rst
index dc9579e758c3ec9a3dbc504ce07e7991f01abe2e..0c1e247cea41eb3aad466b22806fa0b2c15ed65d 100644
--- a/Documentation/gpu/imagination/index.rst
+++ b/Documentation/gpu/imagination/index.rst
@@ -3,7 +3,7 @@ drm/imagination PowerVR Graphics Driver
 =======================================
 
 .. kernel-doc:: drivers/gpu/drm/imagination/pvr_drv.c
-   :doc: PowerVR Graphics Driver
+   :doc: PowerVR (Series 6 and later) and IMG Graphics Driver
 
 Contents
 ========
diff --git a/Documentation/gpu/imagination/uapi.rst b/Documentation/gpu/imagination/uapi.rst
index 2227ea7e6222c0aa6f8b84bb379d4026d3263937..7502413d0a939bb13a0df0cc0efebfcf69e5daf9 100644
--- a/Documentation/gpu/imagination/uapi.rst
+++ b/Documentation/gpu/imagination/uapi.rst
@@ -45,9 +45,6 @@ DEV_QUERY
                  drm_pvr_heap
                  drm_pvr_dev_query_heap_info
 
-.. kernel-doc:: include/uapi/drm/pvr_drm.h
-   :doc: Flags for DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
-
 .. kernel-doc:: include/uapi/drm/pvr_drm.h
    :identifiers: drm_pvr_static_data_area_usage
                  drm_pvr_static_data_area
@@ -121,7 +118,7 @@ CREATE_FREE_LIST and DESTROY_FREE_LIST
    :identifiers: drm_pvr_ioctl_destroy_free_list_args
 
 CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET
---------------------------------------
+--------------------------------------------
 .. kernel-doc:: include/uapi/drm/pvr_drm.h
    :doc: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
 
diff --git a/Documentation/gpu/implementation_guidelines.rst b/Documentation/gpu/implementation_guidelines.rst
index 138e637dcc6b244c9060904b00dffed4d596ca38..dbccfa72f1c9d6ed6ec31694afaa408d58bbb302 100644
--- a/Documentation/gpu/implementation_guidelines.rst
+++ b/Documentation/gpu/implementation_guidelines.rst
@@ -7,3 +7,4 @@ Misc DRM driver uAPI- and feature implementation guidelines
 .. toctree::
 
    drm-vm-bind-async
+   drm-vm-bind-locking
diff --git a/Documentation/gpu/rfc/xe.rst b/Documentation/gpu/rfc/xe.rst
index c29113a0ac301126c9f58f97b9a41b738f555f6e..ceb21219d52ecdbcbae194d2a5ea7e25a54fb49c 100644
--- a/Documentation/gpu/rfc/xe.rst
+++ b/Documentation/gpu/rfc/xe.rst
@@ -123,10 +123,15 @@ Documentation should include:
 
  * O(1) complexity under VM_BIND.
 
+The document is now included in the drm documentation :doc:`here </gpu/drm-vm-bind-async>`.
+
 Some parts of userptr like mmu_notifiers should become GPUVA or DRM helpers when
 the second driver supporting VM_BIND+userptr appears. Details to be defined when
 the time comes.
 
+The DRM GPUVM helpers do not yet include the userptr parts, but discussions
+about implementing them are ongoing.
+
 Long running compute: minimal data structure/scaffolding
 --------------------------------------------------------
 The generic scheduler code needs to include the handling of endless compute
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index b62c7fa0c2bcc3b938a0a31113b261c40446ffda..41a264bf84ce02127ee00ac646b216ed532a9963 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -337,8 +337,8 @@ connector register/unregister fixes
 
 Level: Intermediate
 
-Remove load/unload callbacks from all non-DRIVER_LEGACY drivers
----------------------------------------------------------------
+Remove load/unload callbacks
+----------------------------
 
 The load/unload callbacks in struct &drm_driver are very much midlayers, plus
 for historical reasons they get the ordering wrong (and we can't fix that)
@@ -347,8 +347,7 @@ between setting up the &drm_driver structure and calling drm_dev_register().
 - Rework drivers to no longer use the load/unload callbacks, directly coding the
   load/unload sequence into the driver's probe function.
 
-- Once all non-DRIVER_LEGACY drivers are converted, disallow the load/unload
-  callbacks for all modern drivers.
+- Once all drivers are converted, remove the load/unload callbacks.
 
 Contact: Daniel Vetter
 
@@ -782,6 +781,29 @@ Contact: Hans de Goede
 
 Level: Advanced
 
+Buffer age or other damage accumulation algorithm for buffer damage
+===================================================================
+
+Drivers that do per-buffer uploads, need a buffer damage handling (rather than
+frame damage like drivers that do per-plane or per-CRTC uploads), but there is
+no support to get the buffer age or any other damage accumulation algorithm.
+
+For this reason, the damage helpers just fallback to a full plane update if the
+framebuffer attached to a plane has changed since the last page-flip. Drivers
+set &drm_plane_state.ignore_damage_clips to true as indication to
+drm_atomic_helper_damage_iter_init() and drm_atomic_helper_damage_iter_next()
+helpers that the damage clips should be ignored.
+
+This should be improved to get damage tracking properly working on drivers that
+do per-buffer uploads.
+
+More information about damage tracking and references to learning materials can
+be found in :ref:`damage_tracking_properties`.
+
+Contact: Javier Martinez Canillas <javierm@redhat.com>
+
+Level: Advanced
+
 Outside DRM
 ===========
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ba904b46efe2cb5359b2522076e66f435405b0a..d4b46b3db022f457efbfb180a98f947721f68c0e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10397,6 +10397,7 @@ M:	Frank Binns <frank.binns@imgtec.com>
 M:	Donald Robson <donald.robson@imgtec.com>
 M:	Matt Coster <matt.coster@imgtec.com>
 S:	Supported
+T:	git git://anongit.freedesktop.org/drm/drm-misc
 F:	Documentation/devicetree/bindings/gpu/img,powervr.yaml
 F:	Documentation/gpu/imagination/
 F:	drivers/gpu/drm/imagination/
diff --git a/drivers/accel/drm_accel.c b/drivers/accel/drm_accel.c
index 294b572a9c331f060c3d7af09ae6a320945a2a35..24cac4c0274bb665a35274216726b4e80488bea6 100644
--- a/drivers/accel/drm_accel.c
+++ b/drivers/accel/drm_accel.c
@@ -11,6 +11,7 @@
 #include <linux/idr.h>
 
 #include <drm/drm_accel.h>
+#include <drm/drm_auth.h>
 #include <drm/drm_debugfs.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_file.h>
diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c
index 5d3cc30009cce69ae9a7fc0c4790be4a2f8dac00..832464f2833ad8e660ea73ad0b223ecc891dede6 100644
--- a/drivers/accel/qaic/mhi_controller.c
+++ b/drivers/accel/qaic/mhi_controller.c
@@ -469,7 +469,7 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback re
 		pci_err(qdev->pdev, "Fatal error received from device. Attempting to recover\n");
 	/* this event occurs in non-atomic context */
 	if (reason == MHI_CB_SYS_ERROR)
-		qaic_dev_reset_clean_local_state(qdev, true);
+		qaic_dev_reset_clean_local_state(qdev);
 }
 
 static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl)
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index bc40d52dc0104c996e72e7083935b015dd69d59b..582836f9538f958b4c4cebf3857d85610cb01c34 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -31,6 +31,15 @@
 #define to_drm(qddev) (&(qddev)->drm)
 #define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
 
+enum __packed dev_states {
+	/* Device is offline or will be very soon */
+	QAIC_OFFLINE,
+	/* Device is booting, not clear if it's in a usable state */
+	QAIC_BOOT,
+	/* Device is fully operational */
+	QAIC_ONLINE,
+};
+
 extern bool datapath_polling;
 
 struct qaic_user {
@@ -121,8 +130,8 @@ struct qaic_device {
 	struct workqueue_struct	*cntl_wq;
 	/* Synchronizes all the users of device during cleanup */
 	struct srcu_struct	dev_lock;
-	/* true: Device under reset; false: Device not under reset */
-	bool			in_reset;
+	/* Track the state of the device during resets */
+	enum dev_states		dev_state;
 	/* true: single MSI is used to operate device */
 	bool			single_msi;
 	/*
@@ -274,7 +283,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id);
 void release_dbc(struct qaic_device *qdev, u32 dbc_id);
 
 void wake_all_cntl(struct qaic_device *qdev);
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset);
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev);
 
 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
 
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index 84915824be5434bffdd691ee406a6831ff3d8149..9e8a8cbadf6bb99f1341ac1d62fb890e7d2ea280 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -1022,7 +1022,8 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
 	int xfer_count = 0;
 	int retry_count;
 
-	if (qdev->in_reset) {
+	/* Allow QAIC_BOOT state since we need to check control protocol version */
+	if (qdev->dev_state == QAIC_OFFLINE) {
 		mutex_unlock(&qdev->cntl_mutex);
 		return ERR_PTR(-ENODEV);
 	}
@@ -1306,7 +1307,7 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
 	qdev = usr->qddev->qdev;
 
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
 		srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
 		return -ENODEV;
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 8998c28e566e19c424e3ae7796d261ec195ee4e3..cf2898eda7ae3e88407c4ce7477e109e94282ad9 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -690,7 +690,7 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
@@ -749,7 +749,7 @@ int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
@@ -970,7 +970,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
@@ -1341,7 +1341,7 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
@@ -1497,7 +1497,7 @@ void irq_polling_work(struct work_struct *work)
 	rcu_id = srcu_read_lock(&dbc->ch_lock);
 
 	while (1) {
-		if (dbc->qdev->in_reset) {
+		if (dbc->qdev->dev_state != QAIC_ONLINE) {
 			srcu_read_unlock(&dbc->ch_lock, rcu_id);
 			return;
 		}
@@ -1687,7 +1687,7 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
@@ -1756,7 +1756,7 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
@@ -1847,7 +1847,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
 
 	qdev = usr->qddev->qdev;
 	qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto unlock_dev_srcu;
 	}
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index b12226385003debfc34ae47748bb4382d96c2d15..2a313eb69b1219b5b985dc39b9b9742690c97ccf 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -8,6 +8,7 @@
 #include <linux/idr.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
+#include <linux/kobject.h>
 #include <linux/kref.h>
 #include <linux/mhi.h>
 #include <linux/module.h>
@@ -43,9 +44,6 @@ MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
 static bool link_up;
 static DEFINE_IDA(qaic_usrs);
 
-static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id);
-static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id);
-
 static void free_usr(struct kref *kref)
 {
 	struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
@@ -64,7 +62,7 @@ static int qaic_open(struct drm_device *dev, struct drm_file *file)
 	int ret;
 
 	rcu_id = srcu_read_lock(&qdev->dev_lock);
-	if (qdev->in_reset) {
+	if (qdev->dev_state != QAIC_ONLINE) {
 		ret = -ENODEV;
 		goto dev_unlock;
 	}
@@ -121,7 +119,7 @@ static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
 	if (qddev) {
 		qdev = qddev->qdev;
 		qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
-		if (!qdev->in_reset) {
+		if (qdev->dev_state == QAIC_ONLINE) {
 			qaic_release_usr(qdev, usr);
 			for (i = 0; i < qdev->num_dbc; ++i)
 				if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
@@ -183,13 +181,6 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
 
 	qddev->partition_id = partition_id;
 
-	/*
-	 * drm_dev_unregister() sets the driver data to NULL and
-	 * drm_dev_register() does not update the driver data. During a SOC
-	 * reset drm dev is unregistered and registered again leaving the
-	 * driver data to NULL.
-	 */
-	dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
 	ret = drm_dev_register(drm, 0);
 	if (ret)
 		pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
@@ -203,7 +194,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
 	struct drm_device *drm = to_drm(qddev);
 	struct qaic_user *usr;
 
-	drm_dev_get(drm);
 	drm_dev_unregister(drm);
 	qddev->partition_id = 0;
 	/*
@@ -232,7 +222,6 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
 		mutex_lock(&qddev->users_mutex);
 	}
 	mutex_unlock(&qddev->users_mutex);
-	drm_dev_put(drm);
 }
 
 static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -254,8 +243,6 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
 
 	qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
 
-	qdev->in_reset = false;
-
 	dev_set_drvdata(&mhi_dev->dev, qdev);
 	qdev->cntl_ch = mhi_dev;
 
@@ -265,6 +252,7 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
 		return ret;
 	}
 
+	qdev->dev_state = QAIC_BOOT;
 	ret = get_cntl_version(qdev, NULL, &major, &minor);
 	if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
 		pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
@@ -272,8 +260,8 @@ static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
 		ret = -EINVAL;
 		goto close_control;
 	}
-
-	ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+	qdev->dev_state = QAIC_ONLINE;
+	kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
 
 	return ret;
 
@@ -291,7 +279,8 @@ static void qaic_notify_reset(struct qaic_device *qdev)
 {
 	int i;
 
-	qdev->in_reset = true;
+	kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
+	qdev->dev_state = QAIC_OFFLINE;
 	/* wake up any waiters to avoid waiting for timeouts at sync */
 	wake_all_cntl(qdev);
 	for (i = 0; i < qdev->num_dbc; ++i)
@@ -299,21 +288,15 @@ static void qaic_notify_reset(struct qaic_device *qdev)
 	synchronize_srcu(&qdev->dev_lock);
 }
 
-void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
+void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
 {
 	int i;
 
 	qaic_notify_reset(qdev);
 
-	/* remove drmdevs to prevent new users from coming in */
-	qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
-
 	/* start tearing things down */
 	for (i = 0; i < qdev->num_dbc; ++i)
 		release_dbc(qdev, i);
-
-	if (exit_reset)
-		qdev->in_reset = false;
 }
 
 static void cleanup_qdev(struct qaic_device *qdev)
@@ -338,6 +321,7 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
 	if (!qdev)
 		return NULL;
 
+	qdev->dev_state = QAIC_OFFLINE;
 	if (id->device == PCI_DEV_AIC100) {
 		qdev->num_dbc = 16;
 		qdev->dbc = devm_kcalloc(&pdev->dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
@@ -499,15 +483,21 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto cleanup_qdev;
 	}
 
+	ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
+	if (ret)
+		goto cleanup_qdev;
+
 	qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
 						       qdev->single_msi);
 	if (IS_ERR(qdev->mhi_cntrl)) {
 		ret = PTR_ERR(qdev->mhi_cntrl);
-		goto cleanup_qdev;
+		goto cleanup_drm_dev;
 	}
 
 	return 0;
 
+cleanup_drm_dev:
+	qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
 cleanup_qdev:
 	cleanup_qdev(qdev);
 	return ret;
@@ -520,7 +510,8 @@ static void qaic_pci_remove(struct pci_dev *pdev)
 	if (!qdev)
 		return;
 
-	qaic_dev_reset_clean_local_state(qdev, false);
+	qaic_dev_reset_clean_local_state(qdev);
+	qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
 	qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
 	cleanup_qdev(qdev);
 }
@@ -543,14 +534,13 @@ static void qaic_pci_reset_prepare(struct pci_dev *pdev)
 
 	qaic_notify_reset(qdev);
 	qaic_mhi_start_reset(qdev->mhi_cntrl);
-	qaic_dev_reset_clean_local_state(qdev, false);
+	qaic_dev_reset_clean_local_state(qdev);
 }
 
 static void qaic_pci_reset_done(struct pci_dev *pdev)
 {
 	struct qaic_device *qdev = pci_get_drvdata(pdev);
 
-	qdev->in_reset = false;
 	qaic_mhi_reset_done(qdev->mhi_cntrl);
 }
 
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 64012cda4d1267077c5107354c037f2712ec6122..d944d5298eca822215c21d8a902766495d11c83b 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -112,10 +112,7 @@ config CFAG12864B
 	depends on X86
 	depends on FB
 	depends on KS0108
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
+	select FB_SYSMEM_HELPERS
 	default n
 	help
 	  If you have a Crystalfontz 128x64 2-color LCD, cfag12864b Series,
@@ -170,10 +167,7 @@ config IMG_ASCII_LCD
 config HT16K33
 	tristate "Holtek Ht16K33 LED controller with keyscan"
 	depends on FB && I2C && INPUT
-	select FB_SYS_FOPS
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
+	select FB_SYSMEM_HELPERS
 	select INPUT_MATRIXKMAP
 	select FB_BACKLIGHT
 	select NEW_LEDS
diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c
index 729845bcc803afebb3ffec6bdab24b6f77f0e6cd..5ba19c339f088f76eaad57a9db8e5da3a193b9a6 100644
--- a/drivers/auxdisplay/cfag12864bfb.c
+++ b/drivers/auxdisplay/cfag12864bfb.c
@@ -51,16 +51,15 @@ static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
 	struct page *pages = virt_to_page(cfag12864b_buffer);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return vm_map_pages_zero(vma, &pages, 1);
 }
 
 static const struct fb_ops cfag12864bfb_ops = {
 	.owner = THIS_MODULE,
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	__FB_DEFAULT_SYSMEM_OPS_RDWR,
+	__FB_DEFAULT_SYSMEM_OPS_DRAW,
 	.fb_mmap = cfag12864bfb_mmap,
 };
 
@@ -72,6 +71,7 @@ static int cfag12864bfb_probe(struct platform_device *device)
 	if (!info)
 		goto none;
 
+	info->flags = FBINFO_VIRTFB;
 	info->screen_buffer = cfag12864b_buffer;
 	info->screen_size = CFAG12864B_SIZE;
 	info->fbops = &cfag12864bfb_ops;
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 3a2d883872249e98713b1b813be84b0657388ca9..a90430b7d07ba438c40685ecc043cd0fdd745775 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -351,17 +351,16 @@ static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	struct ht16k33_priv *priv = info->par;
 	struct page *pages = virt_to_page(priv->fbdev.buffer);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return vm_map_pages_zero(vma, &pages, 1);
 }
 
 static const struct fb_ops ht16k33_fb_ops = {
 	.owner = THIS_MODULE,
-	.fb_read = fb_sys_read,
-	.fb_write = fb_sys_write,
+	__FB_DEFAULT_SYSMEM_OPS_RDWR,
 	.fb_blank = ht16k33_blank,
-	.fb_fillrect = sys_fillrect,
-	.fb_copyarea = sys_copyarea,
-	.fb_imageblit = sys_imageblit,
+	__FB_DEFAULT_SYSMEM_OPS_DRAW,
 	.fb_mmap = ht16k33_mmap,
 };
 
@@ -640,6 +639,7 @@ static int ht16k33_fbdev_probe(struct device *dev, struct ht16k33_priv *priv,
 
 	INIT_DELAYED_WORK(&priv->work, ht16k33_fb_update);
 	fbdev->info->fbops = &ht16k33_fb_ops;
+	fbdev->info->flags |= FBINFO_VIRTFB;
 	fbdev->info->screen_buffer = fbdev->buffer;
 	fbdev->info->screen_size = HT16K33_FB_SIZE;
 	fbdev->info->fix = ht16k33_fb_fix;
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 25834557e486538c9dd72c3f75b7eada09f25ba0..43b09cf193bb7ddd41e03b902eaf966ff88d5cce 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,12 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 agpgart-y := backend.o generic.o isoch.o
 
-ifeq ($(CONFIG_DRM_LEGACY),y)
-agpgart-$(CONFIG_COMPAT)	+= compat_ioctl.o
-agpgart-y			+= frontend.o
-endif
-
-
 obj-$(CONFIG_AGP)		+= agpgart.o
 obj-$(CONFIG_AGP_ALI)		+= ali-agp.o
 obj-$(CONFIG_AGP_ATI)		+= ati-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 8771dcc9b8e2f762d895979c32a9f9ffda12f61a..5c36ab85f80b77feb1f57027a46d831a1327a9c7 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -185,15 +185,6 @@ void agp_put_bridge(struct agp_bridge_data *bridge);
 int agp_add_bridge(struct agp_bridge_data *bridge);
 void agp_remove_bridge(struct agp_bridge_data *bridge);
 
-/* Frontend routines. */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int agp_frontend_initialize(void);
-void agp_frontend_cleanup(void);
-#else
-static inline int agp_frontend_initialize(void) { return 0; }
-static inline void agp_frontend_cleanup(void) {}
-#endif
-
 /* Generic routines. */
 void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 0e19c600db5318a639ec3a71004db29e646a4edf..1776afd3ee0785b14b8d6a529bd2d93cbd3248f0 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -293,13 +293,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
 	}
 
 	if (list_empty(&agp_bridges)) {
-		error = agp_frontend_initialize();
-		if (error) {
-			dev_info(&bridge->dev->dev,
-				 "agp_frontend_initialize() failed\n");
-			goto frontend_err;
-		}
-
 		dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
 			 bridge->driver->fetch_size(), bridge->gart_bus_addr);
 
@@ -308,8 +301,6 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
 	list_add(&bridge->list, &agp_bridges);
 	return 0;
 
-frontend_err:
-	agp_backend_cleanup(bridge);
 err_out:
 	module_put(bridge->driver->owner);
 err_put_bridge:
@@ -323,8 +314,6 @@ void agp_remove_bridge(struct agp_bridge_data *bridge)
 {
 	agp_backend_cleanup(bridge);
 	list_del(&bridge->list);
-	if (list_empty(&agp_bridges))
-		agp_frontend_cleanup();
 	module_put(bridge->driver->owner);
 }
 EXPORT_SYMBOL_GPL(agp_remove_bridge);
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
deleted file mode 100644
index 52ffe1706ce0512e472992b0897c82e5e7c155fe..0000000000000000000000000000000000000000
--- a/drivers/char/agp/compat_ioctl.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * AGPGART driver frontend compatibility ioctls
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include "agp.h"
-#include "compat_ioctl.h"
-
-static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_info32 userinfo;
-	struct agp_kern_info kerninfo;
-
-	agp_copy_info(agp_bridge, &kerninfo);
-
-	userinfo.version.major = kerninfo.version.major;
-	userinfo.version.minor = kerninfo.version.minor;
-	userinfo.bridge_id = kerninfo.device->vendor |
-	    (kerninfo.device->device << 16);
-	userinfo.agp_mode = kerninfo.mode;
-	userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
-	userinfo.aper_size = kerninfo.aper_size;
-	userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
-	userinfo.pg_used = kerninfo.current_memory;
-
-	if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_region32 ureserve;
-	struct agp_region kreserve;
-	struct agp_client *client;
-	struct agp_file_private *client_priv;
-
-	DBG("");
-	if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
-		return -EFAULT;
-
-	if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
-		return -EFAULT;
-
-	kreserve.pid = ureserve.pid;
-	kreserve.seg_count = ureserve.seg_count;
-
-	client = agp_find_client_by_pid(kreserve.pid);
-
-	if (kreserve.seg_count == 0) {
-		/* remove a client */
-		client_priv = agp_find_private(kreserve.pid);
-
-		if (client_priv != NULL) {
-			set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
-			set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
-		}
-		if (client == NULL) {
-			/* client is already removed */
-			return 0;
-		}
-		return agp_remove_client(kreserve.pid);
-	} else {
-		struct agp_segment32 *usegment;
-		struct agp_segment *ksegment;
-		int seg;
-
-		if (ureserve.seg_count >= 16384)
-			return -EINVAL;
-
-		usegment = kmalloc_array(ureserve.seg_count,
-					 sizeof(*usegment),
-					 GFP_KERNEL);
-		if (!usegment)
-			return -ENOMEM;
-
-		ksegment = kmalloc_array(kreserve.seg_count,
-					 sizeof(*ksegment),
-					 GFP_KERNEL);
-		if (!ksegment) {
-			kfree(usegment);
-			return -ENOMEM;
-		}
-
-		if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
-				   sizeof(*usegment) * ureserve.seg_count)) {
-			kfree(usegment);
-			kfree(ksegment);
-			return -EFAULT;
-		}
-
-		for (seg = 0; seg < ureserve.seg_count; seg++) {
-			ksegment[seg].pg_start = usegment[seg].pg_start;
-			ksegment[seg].pg_count = usegment[seg].pg_count;
-			ksegment[seg].prot = usegment[seg].prot;
-		}
-
-		kfree(usegment);
-		kreserve.seg_list = ksegment;
-
-		if (client == NULL) {
-			/* Create the client and add the segment */
-			client = agp_create_client(kreserve.pid);
-
-			if (client == NULL) {
-				kfree(ksegment);
-				return -ENOMEM;
-			}
-			client_priv = agp_find_private(kreserve.pid);
-
-			if (client_priv != NULL) {
-				set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
-				set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
-			}
-		}
-		return agp_create_segment(client, &kreserve);
-	}
-	/* Will never really happen */
-	return -EINVAL;
-}
-
-static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_memory *memory;
-	struct agp_allocate32 alloc;
-
-	DBG("");
-	if (copy_from_user(&alloc, arg, sizeof(alloc)))
-		return -EFAULT;
-
-	memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
-	if (memory == NULL)
-		return -ENOMEM;
-
-	alloc.key = memory->key;
-	alloc.physical = memory->physical;
-
-	if (copy_to_user(arg, &alloc, sizeof(alloc))) {
-		agp_free_memory_wrap(memory);
-		return -EFAULT;
-	}
-	return 0;
-}
-
-static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_bind32 bind_info;
-	struct agp_memory *memory;
-
-	DBG("");
-	if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
-		return -EFAULT;
-
-	memory = agp_find_mem_by_key(bind_info.key);
-
-	if (memory == NULL)
-		return -EINVAL;
-
-	return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_memory *memory;
-	struct agp_unbind32 unbind;
-
-	DBG("");
-	if (copy_from_user(&unbind, arg, sizeof(unbind)))
-		return -EFAULT;
-
-	memory = agp_find_mem_by_key(unbind.key);
-
-	if (memory == NULL)
-		return -EINVAL;
-
-	return agp_unbind_memory(memory);
-}
-
-long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct agp_file_private *curr_priv = file->private_data;
-	int ret_val = -ENOTTY;
-
-	mutex_lock(&(agp_fe.agp_mutex));
-
-	if ((agp_fe.current_controller == NULL) &&
-	    (cmd != AGPIOC_ACQUIRE32)) {
-		ret_val = -EINVAL;
-		goto ioctl_out;
-	}
-	if ((agp_fe.backend_acquired != true) &&
-	    (cmd != AGPIOC_ACQUIRE32)) {
-		ret_val = -EBUSY;
-		goto ioctl_out;
-	}
-	if (cmd != AGPIOC_ACQUIRE32) {
-		if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
-			ret_val = -EPERM;
-			goto ioctl_out;
-		}
-		/* Use the original pid of the controller,
-		 * in case it's threaded */
-
-		if (agp_fe.current_controller->pid != curr_priv->my_pid) {
-			ret_val = -EBUSY;
-			goto ioctl_out;
-		}
-	}
-
-	switch (cmd) {
-	case AGPIOC_INFO32:
-		ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_ACQUIRE32:
-		ret_val = agpioc_acquire_wrap(curr_priv);
-		break;
-
-	case AGPIOC_RELEASE32:
-		ret_val = agpioc_release_wrap(curr_priv);
-		break;
-
-	case AGPIOC_SETUP32:
-		ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_RESERVE32:
-		ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_PROTECT32:
-		ret_val = agpioc_protect_wrap(curr_priv);
-		break;
-
-	case AGPIOC_ALLOCATE32:
-		ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_DEALLOCATE32:
-		ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
-		break;
-
-	case AGPIOC_BIND32:
-		ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_UNBIND32:
-		ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_CHIPSET_FLUSH32:
-		break;
-	}
-
-ioctl_out:
-	DBG("ioctl returns %d\n", ret_val);
-	mutex_unlock(&(agp_fe.agp_mutex));
-	return ret_val;
-}
-
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
deleted file mode 100644
index f30e0fd979635355b59ad62ebf5c49c55935e9ed..0000000000000000000000000000000000000000
--- a/drivers/char/agp/compat_ioctl.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _AGP_COMPAT_IOCTL_H
-#define _AGP_COMPAT_IOCTL_H
-
-#include <linux/compat.h>
-#include <linux/agpgart.h>
-
-#define AGPIOC_INFO32       _IOR (AGPIOC_BASE, 0, compat_uptr_t)
-#define AGPIOC_ACQUIRE32    _IO  (AGPIOC_BASE, 1)
-#define AGPIOC_RELEASE32    _IO  (AGPIOC_BASE, 2)
-#define AGPIOC_SETUP32      _IOW (AGPIOC_BASE, 3, compat_uptr_t)
-#define AGPIOC_RESERVE32    _IOW (AGPIOC_BASE, 4, compat_uptr_t)
-#define AGPIOC_PROTECT32    _IOW (AGPIOC_BASE, 5, compat_uptr_t)
-#define AGPIOC_ALLOCATE32   _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
-#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
-#define AGPIOC_BIND32       _IOW (AGPIOC_BASE, 8, compat_uptr_t)
-#define AGPIOC_UNBIND32     _IOW (AGPIOC_BASE, 9, compat_uptr_t)
-#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
-
-struct agp_info32 {
-	struct agp_version version;	/* version of the driver        */
-	u32 bridge_id;		/* bridge vendor/device         */
-	u32 agp_mode;		/* mode info of bridge          */
-	compat_long_t aper_base;	/* base of aperture             */
-	compat_size_t aper_size;	/* size of aperture             */
-	compat_size_t pg_total;	/* max pages (swap + system)    */
-	compat_size_t pg_system;	/* max pages (system)           */
-	compat_size_t pg_used;		/* current pages used           */
-};
-
-/*
- * The "prot" down below needs still a "sleep" flag somehow ...
- */
-struct agp_segment32 {
-	compat_off_t pg_start;		/* starting page to populate    */
-	compat_size_t pg_count;	/* number of pages              */
-	compat_int_t prot;		/* prot flags for mmap          */
-};
-
-struct agp_region32 {
-	compat_pid_t pid;		/* pid of process               */
-	compat_size_t seg_count;	/* number of segments           */
-	struct agp_segment32 *seg_list;
-};
-
-struct agp_allocate32 {
-	compat_int_t key;		/* tag of allocation            */
-	compat_size_t pg_count;	/* number of pages              */
-	u32 type;		/* 0 == normal, other devspec   */
-	u32 physical;           /* device specific (some devices
-				 * need a phys address of the
-				 * actual page behind the gatt
-				 * table)                        */
-};
-
-struct agp_bind32 {
-	compat_int_t key;		/* tag of allocation            */
-	compat_off_t pg_start;		/* starting page to populate    */
-};
-
-struct agp_unbind32 {
-	compat_int_t key;		/* tag of allocation            */
-	u32 priority;		/* priority for paging out      */
-};
-
-extern struct agp_front_data agp_fe;
-
-int agpioc_acquire_wrap(struct agp_file_private *priv);
-int agpioc_release_wrap(struct agp_file_private *priv);
-int agpioc_protect_wrap(struct agp_file_private *priv);
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
-struct agp_file_private *agp_find_private(pid_t pid);
-struct agp_client *agp_create_client(pid_t id);
-int agp_remove_client(pid_t id);
-int agp_create_segment(struct agp_client *client, struct agp_region *region);
-void agp_free_memory_wrap(struct agp_memory *memory);
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
-struct agp_memory *agp_find_mem_by_key(int key);
-struct agp_client *agp_find_client_by_pid(pid_t id);
-
-#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
deleted file mode 100644
index 321118a9cfa52b319ccfe174ceb67654cf9af53b..0000000000000000000000000000000000000000
--- a/drivers/char/agp/frontend.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-/*
- * AGPGART driver frontend
- * Copyright (C) 2004 Silicon Graphics, Inc.
- * Copyright (C) 2002-2003 Dave Jones
- * Copyright (C) 1999 Jeff Hartmann
- * Copyright (C) 1999 Precision Insight, Inc.
- * Copyright (C) 1999 Xi Graphics, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
- * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/agp_backend.h>
-#include <linux/agpgart.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-
-#include "agp.h"
-#include "compat_ioctl.h"
-
-struct agp_front_data agp_fe;
-
-struct agp_memory *agp_find_mem_by_key(int key)
-{
-	struct agp_memory *curr;
-
-	if (agp_fe.current_controller == NULL)
-		return NULL;
-
-	curr = agp_fe.current_controller->pool;
-
-	while (curr != NULL) {
-		if (curr->key == key)
-			break;
-		curr = curr->next;
-	}
-
-	DBG("key=%d -> mem=%p", key, curr);
-	return curr;
-}
-
-static void agp_remove_from_pool(struct agp_memory *temp)
-{
-	struct agp_memory *prev;
-	struct agp_memory *next;
-
-	/* Check to see if this is even in the memory pool */
-
-	DBG("mem=%p", temp);
-	if (agp_find_mem_by_key(temp->key) != NULL) {
-		next = temp->next;
-		prev = temp->prev;
-
-		if (prev != NULL) {
-			prev->next = next;
-			if (next != NULL)
-				next->prev = prev;
-
-		} else {
-			/* This is the first item on the list */
-			if (next != NULL)
-				next->prev = NULL;
-
-			agp_fe.current_controller->pool = next;
-		}
-	}
-}
-
-/*
- * Routines for managing each client's segment list -
- * These routines handle adding and removing segments
- * to each auth'ed client.
- */
-
-static struct
-agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
-						unsigned long offset,
-					    int size, pgprot_t page_prot)
-{
-	struct agp_segment_priv *seg;
-	int i;
-	off_t pg_start;
-	size_t pg_count;
-
-	pg_start = offset / 4096;
-	pg_count = size / 4096;
-	seg = *(client->segments);
-
-	for (i = 0; i < client->num_segments; i++) {
-		if ((seg[i].pg_start == pg_start) &&
-		    (seg[i].pg_count == pg_count) &&
-		    (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
-			return seg + i;
-		}
-	}
-
-	return NULL;
-}
-
-static void agp_remove_seg_from_client(struct agp_client *client)
-{
-	DBG("client=%p", client);
-
-	if (client->segments != NULL) {
-		if (*(client->segments) != NULL) {
-			DBG("Freeing %p from client %p", *(client->segments), client);
-			kfree(*(client->segments));
-		}
-		DBG("Freeing %p from client %p", client->segments, client);
-		kfree(client->segments);
-		client->segments = NULL;
-	}
-}
-
-static void agp_add_seg_to_client(struct agp_client *client,
-			       struct agp_segment_priv ** seg, int num_segments)
-{
-	struct agp_segment_priv **prev_seg;
-
-	prev_seg = client->segments;
-
-	if (prev_seg != NULL)
-		agp_remove_seg_from_client(client);
-
-	DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
-	client->num_segments = num_segments;
-	client->segments = seg;
-}
-
-static pgprot_t agp_convert_mmap_flags(int prot)
-{
-	unsigned long prot_bits;
-
-	prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
-	return vm_get_page_prot(prot_bits);
-}
-
-int agp_create_segment(struct agp_client *client, struct agp_region *region)
-{
-	struct agp_segment_priv **ret_seg;
-	struct agp_segment_priv *seg;
-	struct agp_segment *user_seg;
-	size_t i;
-
-	seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
-	if (seg == NULL) {
-		kfree(region->seg_list);
-		region->seg_list = NULL;
-		return -ENOMEM;
-	}
-	user_seg = region->seg_list;
-
-	for (i = 0; i < region->seg_count; i++) {
-		seg[i].pg_start = user_seg[i].pg_start;
-		seg[i].pg_count = user_seg[i].pg_count;
-		seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
-	}
-	kfree(region->seg_list);
-	region->seg_list = NULL;
-
-	ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
-	if (ret_seg == NULL) {
-		kfree(seg);
-		return -ENOMEM;
-	}
-	*ret_seg = seg;
-	agp_add_seg_to_client(client, ret_seg, region->seg_count);
-	return 0;
-}
-
-/* End - Routines for managing each client's segment list */
-
-/* This function must only be called when current_controller != NULL */
-static void agp_insert_into_pool(struct agp_memory * temp)
-{
-	struct agp_memory *prev;
-
-	prev = agp_fe.current_controller->pool;
-
-	if (prev != NULL) {
-		prev->prev = temp;
-		temp->next = prev;
-	}
-	agp_fe.current_controller->pool = temp;
-}
-
-
-/* File private list routines */
-
-struct agp_file_private *agp_find_private(pid_t pid)
-{
-	struct agp_file_private *curr;
-
-	curr = agp_fe.file_priv_list;
-
-	while (curr != NULL) {
-		if (curr->my_pid == pid)
-			return curr;
-		curr = curr->next;
-	}
-
-	return NULL;
-}
-
-static void agp_insert_file_private(struct agp_file_private * priv)
-{
-	struct agp_file_private *prev;
-
-	prev = agp_fe.file_priv_list;
-
-	if (prev != NULL)
-		prev->prev = priv;
-	priv->next = prev;
-	agp_fe.file_priv_list = priv;
-}
-
-static void agp_remove_file_private(struct agp_file_private * priv)
-{
-	struct agp_file_private *next;
-	struct agp_file_private *prev;
-
-	next = priv->next;
-	prev = priv->prev;
-
-	if (prev != NULL) {
-		prev->next = next;
-
-		if (next != NULL)
-			next->prev = prev;
-
-	} else {
-		if (next != NULL)
-			next->prev = NULL;
-
-		agp_fe.file_priv_list = next;
-	}
-}
-
-/* End - File flag list routines */
-
-/*
- * Wrappers for agp_free_memory & agp_allocate_memory
- * These make sure that internal lists are kept updated.
- */
-void agp_free_memory_wrap(struct agp_memory *memory)
-{
-	agp_remove_from_pool(memory);
-	agp_free_memory(memory);
-}
-
-struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
-{
-	struct agp_memory *memory;
-
-	memory = agp_allocate_memory(agp_bridge, pg_count, type);
-	if (memory == NULL)
-		return NULL;
-
-	agp_insert_into_pool(memory);
-	return memory;
-}
-
-/* Routines for managing the list of controllers -
- * These routines manage the current controller, and the list of
- * controllers
- */
-
-static struct agp_controller *agp_find_controller_by_pid(pid_t id)
-{
-	struct agp_controller *controller;
-
-	controller = agp_fe.controllers;
-
-	while (controller != NULL) {
-		if (controller->pid == id)
-			return controller;
-		controller = controller->next;
-	}
-
-	return NULL;
-}
-
-static struct agp_controller *agp_create_controller(pid_t id)
-{
-	struct agp_controller *controller;
-
-	controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
-	if (controller == NULL)
-		return NULL;
-
-	controller->pid = id;
-	return controller;
-}
-
-static int agp_insert_controller(struct agp_controller *controller)
-{
-	struct agp_controller *prev_controller;
-
-	prev_controller = agp_fe.controllers;
-	controller->next = prev_controller;
-
-	if (prev_controller != NULL)
-		prev_controller->prev = controller;
-
-	agp_fe.controllers = controller;
-
-	return 0;
-}
-
-static void agp_remove_all_clients(struct agp_controller *controller)
-{
-	struct agp_client *client;
-	struct agp_client *temp;
-
-	client = controller->clients;
-
-	while (client) {
-		struct agp_file_private *priv;
-
-		temp = client;
-		agp_remove_seg_from_client(temp);
-		priv = agp_find_private(temp->pid);
-
-		if (priv != NULL) {
-			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-			clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
-		}
-		client = client->next;
-		kfree(temp);
-	}
-}
-
-static void agp_remove_all_memory(struct agp_controller *controller)
-{
-	struct agp_memory *memory;
-	struct agp_memory *temp;
-
-	memory = controller->pool;
-
-	while (memory) {
-		temp = memory;
-		memory = memory->next;
-		agp_free_memory_wrap(temp);
-	}
-}
-
-static int agp_remove_controller(struct agp_controller *controller)
-{
-	struct agp_controller *prev_controller;
-	struct agp_controller *next_controller;
-
-	prev_controller = controller->prev;
-	next_controller = controller->next;
-
-	if (prev_controller != NULL) {
-		prev_controller->next = next_controller;
-		if (next_controller != NULL)
-			next_controller->prev = prev_controller;
-
-	} else {
-		if (next_controller != NULL)
-			next_controller->prev = NULL;
-
-		agp_fe.controllers = next_controller;
-	}
-
-	agp_remove_all_memory(controller);
-	agp_remove_all_clients(controller);
-
-	if (agp_fe.current_controller == controller) {
-		agp_fe.current_controller = NULL;
-		agp_fe.backend_acquired = false;
-		agp_backend_release(agp_bridge);
-	}
-	kfree(controller);
-	return 0;
-}
-
-static void agp_controller_make_current(struct agp_controller *controller)
-{
-	struct agp_client *clients;
-
-	clients = controller->clients;
-
-	while (clients != NULL) {
-		struct agp_file_private *priv;
-
-		priv = agp_find_private(clients->pid);
-
-		if (priv != NULL) {
-			set_bit(AGP_FF_IS_VALID, &priv->access_flags);
-			set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
-		}
-		clients = clients->next;
-	}
-
-	agp_fe.current_controller = controller;
-}
-
-static void agp_controller_release_current(struct agp_controller *controller,
-				      struct agp_file_private *controller_priv)
-{
-	struct agp_client *clients;
-
-	clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
-	clients = controller->clients;
-
-	while (clients != NULL) {
-		struct agp_file_private *priv;
-
-		priv = agp_find_private(clients->pid);
-
-		if (priv != NULL)
-			clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
-
-		clients = clients->next;
-	}
-
-	agp_fe.current_controller = NULL;
-	agp_fe.used_by_controller = false;
-	agp_backend_release(agp_bridge);
-}
-
-/*
- * Routines for managing client lists -
- * These routines are for managing the list of auth'ed clients.
- */
-
-static struct agp_client
-*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
-{
-	struct agp_client *client;
-
-	if (controller == NULL)
-		return NULL;
-
-	client = controller->clients;
-
-	while (client != NULL) {
-		if (client->pid == id)
-			return client;
-		client = client->next;
-	}
-
-	return NULL;
-}
-
-static struct agp_controller *agp_find_controller_for_client(pid_t id)
-{
-	struct agp_controller *controller;
-
-	controller = agp_fe.controllers;
-
-	while (controller != NULL) {
-		if ((agp_find_client_in_controller(controller, id)) != NULL)
-			return controller;
-		controller = controller->next;
-	}
-
-	return NULL;
-}
-
-struct agp_client *agp_find_client_by_pid(pid_t id)
-{
-	struct agp_client *temp;
-
-	if (agp_fe.current_controller == NULL)
-		return NULL;
-
-	temp = agp_find_client_in_controller(agp_fe.current_controller, id);
-	return temp;
-}
-
-static void agp_insert_client(struct agp_client *client)
-{
-	struct agp_client *prev_client;
-
-	prev_client = agp_fe.current_controller->clients;
-	client->next = prev_client;
-
-	if (prev_client != NULL)
-		prev_client->prev = client;
-
-	agp_fe.current_controller->clients = client;
-	agp_fe.current_controller->num_clients++;
-}
-
-struct agp_client *agp_create_client(pid_t id)
-{
-	struct agp_client *new_client;
-
-	new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
-	if (new_client == NULL)
-		return NULL;
-
-	new_client->pid = id;
-	agp_insert_client(new_client);
-	return new_client;
-}
-
-int agp_remove_client(pid_t id)
-{
-	struct agp_client *client;
-	struct agp_client *prev_client;
-	struct agp_client *next_client;
-	struct agp_controller *controller;
-
-	controller = agp_find_controller_for_client(id);
-	if (controller == NULL)
-		return -EINVAL;
-
-	client = agp_find_client_in_controller(controller, id);
-	if (client == NULL)
-		return -EINVAL;
-
-	prev_client = client->prev;
-	next_client = client->next;
-
-	if (prev_client != NULL) {
-		prev_client->next = next_client;
-		if (next_client != NULL)
-			next_client->prev = prev_client;
-
-	} else {
-		if (next_client != NULL)
-			next_client->prev = NULL;
-		controller->clients = next_client;
-	}
-
-	controller->num_clients--;
-	agp_remove_seg_from_client(client);
-	kfree(client);
-	return 0;
-}
-
-/* End - Routines for managing client lists */
-
-/* File Operations */
-
-static int agp_mmap(struct file *file, struct vm_area_struct *vma)
-{
-	unsigned int size, current_size;
-	unsigned long offset;
-	struct agp_client *client;
-	struct agp_file_private *priv = file->private_data;
-	struct agp_kern_info kerninfo;
-
-	mutex_lock(&(agp_fe.agp_mutex));
-
-	if (agp_fe.backend_acquired != true)
-		goto out_eperm;
-
-	if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
-		goto out_eperm;
-
-	agp_copy_info(agp_bridge, &kerninfo);
-	size = vma->vm_end - vma->vm_start;
-	current_size = kerninfo.aper_size;
-	current_size = current_size * 0x100000;
-	offset = vma->vm_pgoff << PAGE_SHIFT;
-	DBG("%lx:%lx", offset, offset+size);
-
-	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
-		if ((size + offset) > current_size)
-			goto out_inval;
-
-		client = agp_find_client_by_pid(current->pid);
-
-		if (client == NULL)
-			goto out_eperm;
-
-		if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
-			goto out_inval;
-
-		DBG("client vm_ops=%p", kerninfo.vm_ops);
-		if (kerninfo.vm_ops) {
-			vma->vm_ops = kerninfo.vm_ops;
-		} else if (io_remap_pfn_range(vma, vma->vm_start,
-				(kerninfo.aper_base + offset) >> PAGE_SHIFT,
-				size,
-				pgprot_writecombine(vma->vm_page_prot))) {
-			goto out_again;
-		}
-		mutex_unlock(&(agp_fe.agp_mutex));
-		return 0;
-	}
-
-	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
-		if (size != current_size)
-			goto out_inval;
-
-		DBG("controller vm_ops=%p", kerninfo.vm_ops);
-		if (kerninfo.vm_ops) {
-			vma->vm_ops = kerninfo.vm_ops;
-		} else if (io_remap_pfn_range(vma, vma->vm_start,
-				kerninfo.aper_base >> PAGE_SHIFT,
-				size,
-				pgprot_writecombine(vma->vm_page_prot))) {
-			goto out_again;
-		}
-		mutex_unlock(&(agp_fe.agp_mutex));
-		return 0;
-	}
-
-out_eperm:
-	mutex_unlock(&(agp_fe.agp_mutex));
-	return -EPERM;
-
-out_inval:
-	mutex_unlock(&(agp_fe.agp_mutex));
-	return -EINVAL;
-
-out_again:
-	mutex_unlock(&(agp_fe.agp_mutex));
-	return -EAGAIN;
-}
-
-static int agp_release(struct inode *inode, struct file *file)
-{
-	struct agp_file_private *priv = file->private_data;
-
-	mutex_lock(&(agp_fe.agp_mutex));
-
-	DBG("priv=%p", priv);
-
-	if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
-		struct agp_controller *controller;
-
-		controller = agp_find_controller_by_pid(priv->my_pid);
-
-		if (controller != NULL) {
-			if (controller == agp_fe.current_controller)
-				agp_controller_release_current(controller, priv);
-			agp_remove_controller(controller);
-			controller = NULL;
-		}
-	}
-
-	if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
-		agp_remove_client(priv->my_pid);
-
-	agp_remove_file_private(priv);
-	kfree(priv);
-	file->private_data = NULL;
-	mutex_unlock(&(agp_fe.agp_mutex));
-	return 0;
-}
-
-static int agp_open(struct inode *inode, struct file *file)
-{
-	int minor = iminor(inode);
-	struct agp_file_private *priv;
-	struct agp_client *client;
-
-	if (minor != AGPGART_MINOR)
-		return -ENXIO;
-
-	mutex_lock(&(agp_fe.agp_mutex));
-
-	priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
-	if (priv == NULL) {
-		mutex_unlock(&(agp_fe.agp_mutex));
-		return -ENOMEM;
-	}
-
-	set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
-	priv->my_pid = current->pid;
-
-	if (capable(CAP_SYS_RAWIO))
-		/* Root priv, can be controller */
-		set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
-
-	client = agp_find_client_by_pid(current->pid);
-
-	if (client != NULL) {
-		set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
-		set_bit(AGP_FF_IS_VALID, &priv->access_flags);
-	}
-	file->private_data = (void *) priv;
-	agp_insert_file_private(priv);
-	DBG("private=%p, client=%p", priv, client);
-
-	mutex_unlock(&(agp_fe.agp_mutex));
-
-	return 0;
-}
-
-static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_info userinfo;
-	struct agp_kern_info kerninfo;
-
-	agp_copy_info(agp_bridge, &kerninfo);
-
-	memset(&userinfo, 0, sizeof(userinfo));
-	userinfo.version.major = kerninfo.version.major;
-	userinfo.version.minor = kerninfo.version.minor;
-	userinfo.bridge_id = kerninfo.device->vendor |
-	    (kerninfo.device->device << 16);
-	userinfo.agp_mode = kerninfo.mode;
-	userinfo.aper_base = kerninfo.aper_base;
-	userinfo.aper_size = kerninfo.aper_size;
-	userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
-	userinfo.pg_used = kerninfo.current_memory;
-
-	if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
-		return -EFAULT;
-
-	return 0;
-}
-
-int agpioc_acquire_wrap(struct agp_file_private *priv)
-{
-	struct agp_controller *controller;
-
-	DBG("");
-
-	if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
-		return -EPERM;
-
-	if (agp_fe.current_controller != NULL)
-		return -EBUSY;
-
-	if (!agp_bridge)
-		return -ENODEV;
-
-        if (atomic_read(&agp_bridge->agp_in_use))
-                return -EBUSY;
-
-	atomic_inc(&agp_bridge->agp_in_use);
-
-	agp_fe.backend_acquired = true;
-
-	controller = agp_find_controller_by_pid(priv->my_pid);
-
-	if (controller != NULL) {
-		agp_controller_make_current(controller);
-	} else {
-		controller = agp_create_controller(priv->my_pid);
-
-		if (controller == NULL) {
-			agp_fe.backend_acquired = false;
-			agp_backend_release(agp_bridge);
-			return -ENOMEM;
-		}
-		agp_insert_controller(controller);
-		agp_controller_make_current(controller);
-	}
-
-	set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
-	set_bit(AGP_FF_IS_VALID, &priv->access_flags);
-	return 0;
-}
-
-int agpioc_release_wrap(struct agp_file_private *priv)
-{
-	DBG("");
-	agp_controller_release_current(agp_fe.current_controller, priv);
-	return 0;
-}
-
-int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_setup mode;
-
-	DBG("");
-	if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
-		return -EFAULT;
-
-	agp_enable(agp_bridge, mode.agp_mode);
-	return 0;
-}
-
-static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_region reserve;
-	struct agp_client *client;
-	struct agp_file_private *client_priv;
-
-	DBG("");
-	if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
-		return -EFAULT;
-
-	if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
-		return -EFAULT;
-
-	client = agp_find_client_by_pid(reserve.pid);
-
-	if (reserve.seg_count == 0) {
-		/* remove a client */
-		client_priv = agp_find_private(reserve.pid);
-
-		if (client_priv != NULL) {
-			set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
-			set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
-		}
-		if (client == NULL) {
-			/* client is already removed */
-			return 0;
-		}
-		return agp_remove_client(reserve.pid);
-	} else {
-		struct agp_segment *segment;
-
-		if (reserve.seg_count >= 16384)
-			return -EINVAL;
-
-		segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
-				  GFP_KERNEL);
-
-		if (segment == NULL)
-			return -ENOMEM;
-
-		if (copy_from_user(segment, (void __user *) reserve.seg_list,
-				   sizeof(struct agp_segment) * reserve.seg_count)) {
-			kfree(segment);
-			return -EFAULT;
-		}
-		reserve.seg_list = segment;
-
-		if (client == NULL) {
-			/* Create the client and add the segment */
-			client = agp_create_client(reserve.pid);
-
-			if (client == NULL) {
-				kfree(segment);
-				return -ENOMEM;
-			}
-			client_priv = agp_find_private(reserve.pid);
-
-			if (client_priv != NULL) {
-				set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
-				set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
-			}
-		}
-		return agp_create_segment(client, &reserve);
-	}
-	/* Will never really happen */
-	return -EINVAL;
-}
-
-int agpioc_protect_wrap(struct agp_file_private *priv)
-{
-	DBG("");
-	/* This function is not currently implemented */
-	return -EINVAL;
-}
-
-static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_memory *memory;
-	struct agp_allocate alloc;
-
-	DBG("");
-	if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
-		return -EFAULT;
-
-	if (alloc.type >= AGP_USER_TYPES)
-		return -EINVAL;
-
-	memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
-
-	if (memory == NULL)
-		return -ENOMEM;
-
-	alloc.key = memory->key;
-	alloc.physical = memory->physical;
-
-	if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
-		agp_free_memory_wrap(memory);
-		return -EFAULT;
-	}
-	return 0;
-}
-
-int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
-{
-	struct agp_memory *memory;
-
-	DBG("");
-	memory = agp_find_mem_by_key(arg);
-
-	if (memory == NULL)
-		return -EINVAL;
-
-	agp_free_memory_wrap(memory);
-	return 0;
-}
-
-static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_bind bind_info;
-	struct agp_memory *memory;
-
-	DBG("");
-	if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
-		return -EFAULT;
-
-	memory = agp_find_mem_by_key(bind_info.key);
-
-	if (memory == NULL)
-		return -EINVAL;
-
-	return agp_bind_memory(memory, bind_info.pg_start);
-}
-
-static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
-{
-	struct agp_memory *memory;
-	struct agp_unbind unbind;
-
-	DBG("");
-	if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
-		return -EFAULT;
-
-	memory = agp_find_mem_by_key(unbind.key);
-
-	if (memory == NULL)
-		return -EINVAL;
-
-	return agp_unbind_memory(memory);
-}
-
-static long agp_ioctl(struct file *file,
-		     unsigned int cmd, unsigned long arg)
-{
-	struct agp_file_private *curr_priv = file->private_data;
-	int ret_val = -ENOTTY;
-
-	DBG("priv=%p, cmd=%x", curr_priv, cmd);
-	mutex_lock(&(agp_fe.agp_mutex));
-
-	if ((agp_fe.current_controller == NULL) &&
-	    (cmd != AGPIOC_ACQUIRE)) {
-		ret_val = -EINVAL;
-		goto ioctl_out;
-	}
-	if ((agp_fe.backend_acquired != true) &&
-	    (cmd != AGPIOC_ACQUIRE)) {
-		ret_val = -EBUSY;
-		goto ioctl_out;
-	}
-	if (cmd != AGPIOC_ACQUIRE) {
-		if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
-			ret_val = -EPERM;
-			goto ioctl_out;
-		}
-		/* Use the original pid of the controller,
-		 * in case it's threaded */
-
-		if (agp_fe.current_controller->pid != curr_priv->my_pid) {
-			ret_val = -EBUSY;
-			goto ioctl_out;
-		}
-	}
-
-	switch (cmd) {
-	case AGPIOC_INFO:
-		ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_ACQUIRE:
-		ret_val = agpioc_acquire_wrap(curr_priv);
-		break;
-
-	case AGPIOC_RELEASE:
-		ret_val = agpioc_release_wrap(curr_priv);
-		break;
-
-	case AGPIOC_SETUP:
-		ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_RESERVE:
-		ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_PROTECT:
-		ret_val = agpioc_protect_wrap(curr_priv);
-		break;
-
-	case AGPIOC_ALLOCATE:
-		ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_DEALLOCATE:
-		ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
-		break;
-
-	case AGPIOC_BIND:
-		ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_UNBIND:
-		ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
-		break;
-
-	case AGPIOC_CHIPSET_FLUSH:
-		break;
-	}
-
-ioctl_out:
-	DBG("ioctl returns %d\n", ret_val);
-	mutex_unlock(&(agp_fe.agp_mutex));
-	return ret_val;
-}
-
-static const struct file_operations agp_fops =
-{
-	.owner		= THIS_MODULE,
-	.llseek		= no_llseek,
-	.unlocked_ioctl	= agp_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= compat_agp_ioctl,
-#endif
-	.mmap		= agp_mmap,
-	.open		= agp_open,
-	.release	= agp_release,
-};
-
-static struct miscdevice agp_miscdev =
-{
-	.minor	= AGPGART_MINOR,
-	.name	= "agpgart",
-	.fops	= &agp_fops
-};
-
-int agp_frontend_initialize(void)
-{
-	memset(&agp_fe, 0, sizeof(struct agp_front_data));
-	mutex_init(&(agp_fe.agp_mutex));
-
-	if (misc_register(&agp_miscdev)) {
-		printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
-		return -EIO;
-	}
-	return 0;
-}
-
-void agp_frontend_cleanup(void)
-{
-	misc_deregister(&agp_miscdev);
-}
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 8aa8f8cb7071efcce29b62cf5ac3bd96c7308056..e0fd99e61a2d4fcc0f8ea3c671fb0fb2238fa332 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -934,7 +934,8 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
  *   the GPU's devfreq to reduce frequency, when in fact the opposite is what is
  *   needed.
  *
- * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline.
+ * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
+ * (or indirectly via userspace facing ioctls like &sync_set_deadline).
  * The deadline hint provides a way for the waiting driver, or userspace, to
  * convey an appropriate sense of urgency to the signaling driver.
  *
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f0a35277fd844c2b12621e37315d1f14153e766d..c353029789cf1a3b9c4915552114c8a07fe1ff42 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -52,12 +52,33 @@ struct sw_sync_create_fence_data {
 	__s32	fence; /* fd of new fence */
 };
 
+/**
+ * struct sw_sync_get_deadline - get the deadline hint of a sw_sync fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad:	must be zero
+ * @fence_fd:	the sw_sync fence fd (in)
+ *
+ * Return the earliest deadline set on the fence.  The timebase for the
+ * deadline is CLOCK_MONOTONIC (same as vblank).  If there is no deadline
+ * set on the fence, this ioctl will return -ENOENT.
+ */
+struct sw_sync_get_deadline {
+	__u64	deadline_ns;
+	__u32	pad;
+	__s32	fence_fd;
+};
+
 #define SW_SYNC_IOC_MAGIC	'W'
 
 #define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
 		struct sw_sync_create_fence_data)
 
 #define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+#define SW_SYNC_GET_DEADLINE		_IOWR(SW_SYNC_IOC_MAGIC, 2, \
+		struct sw_sync_get_deadline)
+
+
+#define SW_SYNC_HAS_DEADLINE_BIT	DMA_FENCE_FLAG_USER_BITS
 
 static const struct dma_fence_ops timeline_fence_ops;
 
@@ -171,6 +192,22 @@ static void timeline_fence_timeline_value_str(struct dma_fence *fence,
 	snprintf(str, size, "%d", parent->value);
 }
 
+static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
+{
+	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+	unsigned long flags;
+
+	spin_lock_irqsave(fence->lock, flags);
+	if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+		if (ktime_before(deadline, pt->deadline))
+			pt->deadline = deadline;
+	} else {
+		pt->deadline = deadline;
+		__set_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags);
+	}
+	spin_unlock_irqrestore(fence->lock, flags);
+}
+
 static const struct dma_fence_ops timeline_fence_ops = {
 	.get_driver_name = timeline_fence_get_driver_name,
 	.get_timeline_name = timeline_fence_get_timeline_name,
@@ -179,6 +216,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
 	.release = timeline_fence_release,
 	.fence_value_str = timeline_fence_value_str,
 	.timeline_value_str = timeline_fence_timeline_value_str,
+	.set_deadline = timeline_fence_set_deadline,
 };
 
 /**
@@ -387,6 +425,47 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
 	return 0;
 }
 
+static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long arg)
+{
+	struct sw_sync_get_deadline data;
+	struct dma_fence *fence;
+	unsigned long flags;
+	struct sync_pt *pt;
+	int ret = 0;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	if (data.deadline_ns || data.pad)
+		return -EINVAL;
+
+	fence = sync_file_get_fence(data.fence_fd);
+	if (!fence)
+		return -EINVAL;
+
+	pt = dma_fence_to_sync_pt(fence);
+	if (!pt)
+		return -EINVAL;
+
+	spin_lock_irqsave(fence->lock, flags);
+	if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
+		data.deadline_ns = ktime_to_ns(pt->deadline);
+	} else {
+		ret = -ENOENT;
+	}
+	spin_unlock_irqrestore(fence->lock, flags);
+
+	dma_fence_put(fence);
+
+	if (ret)
+		return ret;
+
+	if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+		return -EFAULT;
+
+	return 0;
+}
+
 static long sw_sync_ioctl(struct file *file, unsigned int cmd,
 			  unsigned long arg)
 {
@@ -399,6 +478,9 @@ static long sw_sync_ioctl(struct file *file, unsigned int cmd,
 	case SW_SYNC_IOC_INC:
 		return sw_sync_ioctl_inc(obj, arg);
 
+	case SW_SYNC_GET_DEADLINE:
+		return sw_sync_ioctl_get_deadline(obj, arg);
+
 	default:
 		return -ENOTTY;
 	}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 6176e52ba2d748e1de3ded8a23f8d188f74deb01..a1bdd62efccd86aa3293e3a0d26bb9b3d6fffeb4 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -55,11 +55,13 @@ static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
  * @base: base fence object
  * @link: link on the sync timeline's list
  * @node: node in the sync timeline's tree
+ * @deadline: the earliest fence deadline hint
  */
 struct sync_pt {
 	struct dma_fence base;
 	struct list_head link;
 	struct rb_node node;
+	ktime_t deadline;
 };
 
 extern const struct file_operations sw_sync_debugfs_fops;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 2e9a316c596a3b3a02a950e704a265db4516b46d..d9b1c1b2a72b2c619b973db791783b28a03b8d13 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -347,6 +347,22 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
 	return ret;
 }
 
+static int sync_file_ioctl_set_deadline(struct sync_file *sync_file,
+					unsigned long arg)
+{
+	struct sync_set_deadline ts;
+
+	if (copy_from_user(&ts, (void __user *)arg, sizeof(ts)))
+		return -EFAULT;
+
+	if (ts.pad)
+		return -EINVAL;
+
+	dma_fence_set_deadline(sync_file->fence, ns_to_ktime(ts.deadline_ns));
+
+	return 0;
+}
+
 static long sync_file_ioctl(struct file *file, unsigned int cmd,
 			    unsigned long arg)
 {
@@ -359,6 +375,9 @@ static long sync_file_ioctl(struct file *file, unsigned int cmd,
 	case SYNC_IOC_FILE_INFO:
 		return sync_file_ioctl_fence_info(sync_file, arg);
 
+	case SYNC_IOC_SET_DEADLINE:
+		return sync_file_ioctl_set_deadline(sync_file, arg);
+
 	default:
 		return -ENOTTY;
 	}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 740c1c0bd06806a711790d0bfca1391411506029..31cfe2c2a2afa08baf3dcc2a8910f22f08a490d4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -74,12 +74,13 @@ config DRM_KUNIT_TEST_HELPERS
 
 config DRM_KUNIT_TEST
 	tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
-	depends on DRM && KUNIT
+	depends on DRM && KUNIT && MMU
 	select DRM_BUDDY
 	select DRM_DISPLAY_DP_HELPER
 	select DRM_DISPLAY_HELPER
 	select DRM_EXEC
 	select DRM_EXPORT_FOR_TESTS if m
+	select DRM_GEM_SHMEM_HELPER
 	select DRM_KMS_HELPER
 	select DRM_KUNIT_TEST_HELPERS
 	select DRM_LIB_RANDOM
@@ -409,27 +410,6 @@ config DRM_HYPERV
 
 	 If M is selected the module will be called hyperv_drm.
 
-# Keep legacy drivers last
-
-menuconfig DRM_LEGACY
-	bool "Enable legacy drivers (DANGEROUS)"
-	depends on DRM && MMU
-	help
-	  Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
-	  APIs to user-space, which can be used to circumvent access
-	  restrictions and other security measures. For backwards compatibility
-	  those drivers are still available, but their use is highly
-	  inadvisable and might harm your system.
-
-	  You are recommended to use the safe modeset-only drivers instead, and
-	  perform 3D emulation in user-space.
-
-	  Unless you have strong reasons to go rogue, say "N".
-
-if DRM_LEGACY
-# leave here to list legacy drivers
-endif # DRM_LEGACY
-
 config DRM_EXPORT_FOR_TESTS
 	bool
 
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index b4cb0835620af31c3fb43f333f32284a73927653..8ac6f4b9546e64ff5ff1e8a6639ccf9ff4a163e4 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -47,18 +47,6 @@ drm-y := \
 	drm_vblank_work.o \
 	drm_vma_manager.o \
 	drm_writeback.o
-drm-$(CONFIG_DRM_LEGACY) += \
-	drm_agpsupport.o \
-	drm_bufs.o \
-	drm_context.o \
-	drm_dma.o \
-	drm_hashtab.o \
-	drm_irq.o \
-	drm_legacy_misc.o \
-	drm_lock.o \
-	drm_memory.o \
-	drm_scatter.o \
-	drm_vm.o
 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e2ae9ba147ba9749a65127458444046f0dc72961..5cb33ac99f7089fb81c4954d8cbb6c434c713be6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
 		return DRM_SCHED_PRIORITY_NORMAL;
 
 	case AMDGPU_CTX_PRIORITY_VERY_LOW:
-		return DRM_SCHED_PRIORITY_MIN;
+		return DRM_SCHED_PRIORITY_LOW;
 
 	case AMDGPU_CTX_PRIORITY_LOW:
-		return DRM_SCHED_PRIORITY_MIN;
+		return DRM_SCHED_PRIORITY_LOW;
 
 	case AMDGPU_CTX_PRIORITY_NORMAL:
 		return DRM_SCHED_PRIORITY_NORMAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 62bb7fc7448ad91d85fb056283e8b346156cb52b..71a5cf37b472d473022740be33e05d4adc21997c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 	int i;
 
 	/* Signal all jobs not yet scheduled */
-	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		struct drm_sched_rq *rq = sched->sched_rq[i];
 		spin_lock(&rq->lock);
 		list_for_each_entry(s_entity, &rq->entities, list) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bc7b8a3253538121b64480a4e0b3a86a6c69ed62..d6e325c848eb4ca093a85085ef787ec3a4c9d0cb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -92,7 +92,6 @@
 #include <drm/drm_vblank.h>
 #include <drm/drm_audio_component.h>
 #include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
 
 #include <acpi/video.h>
 
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 52d2c942d3d2a06c75aa54b32df11a14ea37c7d8..c78687c755a869a8d3eb824dff01b8e5828ea890 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,8 +7,9 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
@@ -1012,26 +1013,17 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
 	int irq = platform_get_irq(pdev, 0);
 	const struct armada_variant *variant;
 	struct device_node *port = NULL;
+	struct device_node *np, *parent = dev->of_node;
 
 	if (irq < 0)
 		return irq;
 
-	if (!dev->of_node) {
-		const struct platform_device_id *id;
 
-		id = platform_get_device_id(pdev);
-		if (!id)
-			return -ENXIO;
-
-		variant = (const struct armada_variant *)id->driver_data;
-	} else {
-		const struct of_device_id *match;
-		struct device_node *np, *parent = dev->of_node;
-
-		match = of_match_device(dev->driver->of_match_table, dev);
-		if (!match)
-			return -ENXIO;
+	variant = device_get_match_data(dev);
+	if (!variant)
+		return -ENXIO;
 
+	if (parent) {
 		np = of_get_child_by_name(parent, "ports");
 		if (np)
 			parent = np;
@@ -1041,8 +1033,6 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
 			dev_err(dev, "no port node found in %pOF\n", parent);
 			return -ENXIO;
 		}
-
-		variant = match->data;
 	}
 
 	return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 78122b35a0cbb347fcd2e4dfce139800e26aba65..a7a6b70220eb8cc95919e97216d95a9690c82bd9 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -6,10 +6,10 @@
 #include <linux/irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 
@@ -143,7 +143,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
 	struct aspeed_gfx *priv = to_aspeed_gfx(drm);
 	struct device_node *np = pdev->dev.of_node;
 	const struct aspeed_gfx_config *config;
-	const struct of_device_id *match;
 	struct resource *res;
 	int ret;
 
@@ -152,10 +151,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
 	if (IS_ERR(priv->base))
 		return PTR_ERR(priv->base);
 
-	match = of_match_device(aspeed_gfx_match, &pdev->dev);
-	if (!match)
+	config = device_get_match_data(&pdev->dev);
+	if (!config)
 		return -EINVAL;
-	config = match->data;
 
 	priv->dac_reg = config->dac_reg;
 	priv->int_clr_reg = config->int_clear_reg;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index cf5b754f044c76ecb554883c22fd5db443f41720..90bcb1eb9cd94f67d11da54835cb18b588ef692f 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -89,11 +89,194 @@ static const struct pci_device_id ast_pciidlist[] = {
 
 MODULE_DEVICE_TABLE(pci, ast_pciidlist);
 
+static bool ast_is_vga_enabled(void __iomem *ioregs)
+{
+	u8 vgaer = __ast_read8(ioregs, AST_IO_VGAER);
+
+	return vgaer & AST_IO_VGAER_VGA_ENABLE;
+}
+
+static void ast_enable_vga(void __iomem *ioregs)
+{
+	__ast_write8(ioregs, AST_IO_VGAER, AST_IO_VGAER_VGA_ENABLE);
+	__ast_write8(ioregs, AST_IO_VGAMR_W, AST_IO_VGAMR_IOSEL);
+}
+
+/*
+ * Run this function as part of the HW device cleanup; not
+ * when the DRM device gets released.
+ */
+static void ast_enable_mmio_release(void *data)
+{
+	void __iomem *ioregs = (void __force __iomem *)data;
+
+	/* enable standard VGA decode */
+	__ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1, AST_IO_VGACRA1_MMIO_ENABLED);
+}
+
+static int ast_enable_mmio(struct device *dev, void __iomem *ioregs)
+{
+	void *data = (void __force *)ioregs;
+
+	__ast_write8_i(ioregs, AST_IO_VGACRI, 0xa1,
+		       AST_IO_VGACRA1_MMIO_ENABLED |
+		       AST_IO_VGACRA1_VGAIO_DISABLED);
+
+	return devm_add_action_or_reset(dev, ast_enable_mmio_release, data);
+}
+
+static void ast_open_key(void __iomem *ioregs)
+{
+	__ast_write8_i(ioregs, AST_IO_VGACRI, 0x80, AST_IO_VGACR80_PASSWORD);
+}
+
+static int ast_detect_chip(struct pci_dev *pdev,
+			   void __iomem *regs, void __iomem *ioregs,
+			   enum ast_chip *chip_out,
+			   enum ast_config_mode *config_mode_out)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	enum ast_config_mode config_mode = ast_use_defaults;
+	uint32_t scu_rev = 0xffffffff;
+	enum ast_chip chip;
+	u32 data;
+	u8 vgacrd0, vgacrd1;
+
+	/*
+	 * Find configuration mode and read SCU revision
+	 */
+
+	/* Check if we have device-tree properties */
+	if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
+		/* We do, disable P2A access */
+		config_mode = ast_use_dt;
+		scu_rev = data;
+	} else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
+		/*
+		 * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+		 * is disabled. We force using P2A if VGA only mode bit
+		 * is set D[7]
+		 */
+		vgacrd0 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd0);
+		vgacrd1 = __ast_read8_i(ioregs, AST_IO_VGACRI, 0xd1);
+		if (!(vgacrd0 & 0x80) || !(vgacrd1 & 0x10)) {
+
+			/*
+			 * We have a P2A bridge and it is enabled.
+			 */
+
+			/* Patch AST2500/AST2510 */
+			if ((pdev->revision & 0xf0) == 0x40) {
+				if (!(vgacrd0 & AST_VRAM_INIT_STATUS_MASK))
+					ast_patch_ahb_2500(regs);
+			}
+
+			/* Double check that it's actually working */
+			data = __ast_read32(regs, 0xf004);
+			if ((data != 0xffffffff) && (data != 0x00)) {
+				config_mode = ast_use_p2a;
+
+				/* Read SCU7c (silicon revision register) */
+				__ast_write32(regs, 0xf004, 0x1e6e0000);
+				__ast_write32(regs, 0xf000, 0x1);
+				scu_rev = __ast_read32(regs, 0x1207c);
+			}
+		}
+	}
+
+	switch (config_mode) {
+	case ast_use_defaults:
+		dev_info(dev, "Using default configuration\n");
+		break;
+	case ast_use_dt:
+		dev_info(dev, "Using device-tree for configuration\n");
+		break;
+	case ast_use_p2a:
+		dev_info(dev, "Using P2A bridge for configuration\n");
+		break;
+	}
+
+	/*
+	 * Identify chipset
+	 */
+
+	if (pdev->revision >= 0x50) {
+		chip = AST2600;
+		dev_info(dev, "AST 2600 detected\n");
+	} else if (pdev->revision >= 0x40) {
+		switch (scu_rev & 0x300) {
+		case 0x0100:
+			chip = AST2510;
+			dev_info(dev, "AST 2510 detected\n");
+			break;
+		default:
+			chip = AST2500;
+			dev_info(dev, "AST 2500 detected\n");
+			break;
+		}
+	} else if (pdev->revision >= 0x30) {
+		switch (scu_rev & 0x300) {
+		case 0x0100:
+			chip = AST1400;
+			dev_info(dev, "AST 1400 detected\n");
+			break;
+		default:
+			chip = AST2400;
+			dev_info(dev, "AST 2400 detected\n");
+			break;
+		}
+	} else if (pdev->revision >= 0x20) {
+		switch (scu_rev & 0x300) {
+		case 0x0000:
+			chip = AST1300;
+			dev_info(dev, "AST 1300 detected\n");
+			break;
+		default:
+			chip = AST2300;
+			dev_info(dev, "AST 2300 detected\n");
+			break;
+		}
+	} else if (pdev->revision >= 0x10) {
+		switch (scu_rev & 0x0300) {
+		case 0x0200:
+			chip = AST1100;
+			dev_info(dev, "AST 1100 detected\n");
+			break;
+		case 0x0100:
+			chip = AST2200;
+			dev_info(dev, "AST 2200 detected\n");
+			break;
+		case 0x0000:
+			chip = AST2150;
+			dev_info(dev, "AST 2150 detected\n");
+			break;
+		default:
+			chip = AST2100;
+			dev_info(dev, "AST 2100 detected\n");
+			break;
+		}
+	} else {
+		chip = AST2000;
+		dev_info(dev, "AST 2000 detected\n");
+	}
+
+	*chip_out = chip;
+	*config_mode_out = config_mode;
+
+	return 0;
+}
+
 static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-	struct ast_device *ast;
-	struct drm_device *dev;
+	struct device *dev = &pdev->dev;
 	int ret;
+	void __iomem *regs;
+	void __iomem *ioregs;
+	enum ast_config_mode config_mode;
+	enum ast_chip chip;
+	struct drm_device *drm;
+	bool need_post = false;
 
 	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
 	if (ret)
@@ -103,16 +286,80 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (ret)
 		return ret;
 
-	ast = ast_device_create(&ast_driver, pdev, ent->driver_data);
-	if (IS_ERR(ast))
-		return PTR_ERR(ast);
-	dev = &ast->base;
+	regs = pcim_iomap(pdev, 1, 0);
+	if (!regs)
+		return -EIO;
+
+	if (pdev->revision >= 0x40) {
+		/*
+		 * On AST2500 and later models, MMIO is enabled by
+		 * default. Adopt it to be compatible with ARM.
+		 */
+		resource_size_t len = pci_resource_len(pdev, 1);
+
+		if (len < AST_IO_MM_OFFSET)
+			return -EIO;
+		if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+			return -EIO;
+		ioregs = regs + AST_IO_MM_OFFSET;
+	} else if (pci_resource_flags(pdev, 2) & IORESOURCE_IO) {
+		/*
+		 * Map I/O registers if we have a PCI BAR for I/O.
+		 */
+		resource_size_t len = pci_resource_len(pdev, 2);
+
+		if (len < AST_IO_MM_LENGTH)
+			return -EIO;
+		ioregs = pcim_iomap(pdev, 2, 0);
+		if (!ioregs)
+			return -EIO;
+	} else {
+		/*
+		 * Anything else is best effort.
+		 */
+		resource_size_t len = pci_resource_len(pdev, 1);
+
+		if (len < AST_IO_MM_OFFSET)
+			return -EIO;
+		if ((len - AST_IO_MM_OFFSET) < AST_IO_MM_LENGTH)
+			return -EIO;
+		ioregs = regs + AST_IO_MM_OFFSET;
+
+		dev_info(dev, "Platform has no I/O space, using MMIO\n");
+	}
+
+	if (!ast_is_vga_enabled(ioregs)) {
+		dev_info(dev, "VGA not enabled on entry, requesting chip POST\n");
+		need_post = true;
+	}
+
+	/*
+	 * If VGA isn't enabled, we need to enable now or subsequent
+	 * access to the scratch registers will fail.
+	 */
+	if (need_post)
+		ast_enable_vga(ioregs);
+	/* Enable extended register access */
+	ast_open_key(ioregs);
+
+	ret = ast_enable_mmio(dev, ioregs);
+	if (ret)
+		return ret;
+
+	ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
+	if (ret)
+		return ret;
+
+	drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+	if (IS_ERR(drm))
+		return PTR_ERR(drm);
+	pci_set_drvdata(pdev, drm);
 
-	ret = drm_dev_register(dev, ent->driver_data);
+	ret = drm_dev_register(drm, ent->driver_data);
 	if (ret)
 		return ret;
 
-	drm_fbdev_generic_setup(dev, 32);
+	drm_fbdev_generic_setup(drm, 32);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 772f3b049c1694408d7acc89f4235a1c8c657ea9..3be5ccf1f5f4d5d32ee9f8f18e147a65b90b32a5 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,12 @@ enum ast_tx_chip {
 #define AST_TX_DP501_BIT	BIT(AST_TX_DP501)
 #define AST_TX_ASTDP_BIT	BIT(AST_TX_ASTDP)
 
+enum ast_config_mode {
+	ast_use_p2a,
+	ast_use_dt,
+	ast_use_defaults
+};
+
 #define AST_DRAM_512Mx16 0
 #define AST_DRAM_1Gx16   1
 #define AST_DRAM_512Mx32 2
@@ -192,12 +198,13 @@ to_ast_bmc_connector(struct drm_connector *connector)
 struct ast_device {
 	struct drm_device base;
 
-	struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
 	void __iomem *regs;
 	void __iomem *ioregs;
 	void __iomem *dp501_fw_buf;
 
+	enum ast_config_mode config_mode;
 	enum ast_chip chip;
+
 	uint32_t dram_bus_width;
 	uint32_t dram_type;
 	uint32_t mclk;
@@ -207,6 +214,8 @@ struct ast_device {
 	unsigned long	vram_size;
 	unsigned long	vram_fb_available;
 
+	struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */
+
 	struct ast_plane primary_plane;
 	struct ast_plane cursor_plane;
 	struct drm_crtc crtc;
@@ -234,11 +243,6 @@ struct ast_device {
 	} output;
 
 	bool support_wide_screen;
-	enum {
-		ast_use_p2a,
-		ast_use_dt,
-		ast_use_defaults
-	} config_mode;
 
 	unsigned long tx_chip_types;		/* bitfield of enum ast_chip_type */
 	u8 *dp501_fw_addr;
@@ -250,9 +254,13 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
 	return container_of(dev, struct ast_device, base);
 }
 
-struct ast_device *ast_device_create(const struct drm_driver *drv,
-				     struct pci_dev *pdev,
-				     unsigned long flags);
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+				     const struct drm_driver *drv,
+				     enum ast_chip chip,
+				     enum ast_config_mode config_mode,
+				     void __iomem *regs,
+				     void __iomem *ioregs,
+				     bool need_post);
 
 static inline unsigned long __ast_gen(struct ast_device *ast)
 {
@@ -272,55 +280,94 @@ static inline bool __ast_gen_is_eq(struct ast_device *ast, unsigned long gen)
 #define IS_AST_GEN6(__ast)	__ast_gen_is_eq(__ast, 6)
 #define IS_AST_GEN7(__ast)	__ast_gen_is_eq(__ast, 7)
 
+static inline u8 __ast_read8(const void __iomem *addr, u32 reg)
+{
+	return ioread8(addr + reg);
+}
+
+static inline u32 __ast_read32(const void __iomem *addr, u32 reg)
+{
+	return ioread32(addr + reg);
+}
+
+static inline void __ast_write8(void __iomem *addr, u32 reg, u8 val)
+{
+	iowrite8(val, addr + reg);
+}
+
+static inline void __ast_write32(void __iomem *addr, u32 reg, u32 val)
+{
+	iowrite32(val, addr + reg);
+}
+
+static inline u8 __ast_read8_i(void __iomem *addr, u32 reg, u8 index)
+{
+	__ast_write8(addr, reg, index);
+	return __ast_read8(addr, reg + 1);
+}
+
+static inline u8 __ast_read8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask)
+{
+	u8 val = __ast_read8_i(addr, reg, index);
+
+	return val & read_mask;
+}
+
+static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
+{
+	__ast_write8(addr, reg, index);
+	__ast_write8(addr, reg + 1, val);
+}
+
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+					 u8 val)
+{
+	u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+
+	tmp |= val;
+	__ast_write8_i(addr, reg, index, tmp);
+}
+
 static inline u32 ast_read32(struct ast_device *ast, u32 reg)
 {
-	return ioread32(ast->regs + reg);
+	return __ast_read32(ast->regs, reg);
 }
 
 static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
 {
-	iowrite32(val, ast->regs + reg);
+	__ast_write32(ast->regs, reg, val);
 }
 
 static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
 {
-	return ioread8(ast->ioregs + reg);
+	return __ast_read8(ast->ioregs, reg);
 }
 
 static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
 {
-	iowrite8(val, ast->ioregs + reg);
+	__ast_write8(ast->ioregs, reg, val);
 }
 
 static inline u8 ast_get_index_reg(struct ast_device *ast, u32 base, u8 index)
 {
-	ast_io_write8(ast, base, index);
-	++base;
-	return ast_io_read8(ast, base);
+	return __ast_read8_i(ast->ioregs, base, index);
 }
 
 static inline u8 ast_get_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
 					u8 preserve_mask)
 {
-	u8 val = ast_get_index_reg(ast, base, index);
-
-	return val & preserve_mask;
+	return __ast_read8_i_masked(ast->ioregs, base, index, preserve_mask);
 }
 
 static inline void ast_set_index_reg(struct ast_device *ast, u32 base, u8 index, u8 val)
 {
-	ast_io_write8(ast, base, index);
-	++base;
-	ast_io_write8(ast, base, val);
+	__ast_write8_i(ast->ioregs, base, index, val);
 }
 
 static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 index,
 					  u8 preserve_mask, u8 val)
 {
-	u8 tmp = ast_get_index_reg_mask(ast, base, index, preserve_mask);
-
-	tmp |= val;
-	ast_set_index_reg(ast, base, index, tmp);
+	__ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val);
 }
 
 #define AST_VIDMEM_SIZE_8M    0x00800000
@@ -442,7 +489,7 @@ int ast_mm_init(struct ast_device *ast);
 void ast_post_gpu(struct drm_device *dev);
 u32 ast_mindwm(struct ast_device *ast, u32 r);
 void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_device *ast);
+void ast_patch_ahb_2500(void __iomem *regs);
 /* ast dp501 */
 void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
 bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f4ab40e22ceaca9c3496360b60213c5a2e341a8d..2f3ad5f949fcb0adb8cfb7e31eada220a2c4e090 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,180 +35,6 @@
 
 #include "ast_drv.h"
 
-static bool ast_is_vga_enabled(struct drm_device *dev)
-{
-	struct ast_device *ast = to_ast_device(dev);
-	u8 ch;
-
-	ch = ast_io_read8(ast, AST_IO_VGAER);
-
-	return !!(ch & 0x01);
-}
-
-static void ast_enable_vga(struct drm_device *dev)
-{
-	struct ast_device *ast = to_ast_device(dev);
-
-	ast_io_write8(ast, AST_IO_VGAER, 0x01);
-	ast_io_write8(ast, AST_IO_VGAMR_W, 0x01);
-}
-
-/*
- * Run this function as part of the HW device cleanup; not
- * when the DRM device gets released.
- */
-static void ast_enable_mmio_release(void *data)
-{
-	struct ast_device *ast = data;
-
-	/* enable standard VGA decode */
-	ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x04);
-}
-
-static int ast_enable_mmio(struct ast_device *ast)
-{
-	struct drm_device *dev = &ast->base;
-
-	ast_set_index_reg(ast, AST_IO_VGACRI, 0xa1, 0x06);
-
-	return devm_add_action_or_reset(dev->dev, ast_enable_mmio_release, ast);
-}
-
-static void ast_open_key(struct ast_device *ast)
-{
-	ast_set_index_reg(ast, AST_IO_VGACRI, 0x80, 0xA8);
-}
-
-static int ast_device_config_init(struct ast_device *ast)
-{
-	struct drm_device *dev = &ast->base;
-	struct pci_dev *pdev = to_pci_dev(dev->dev);
-	struct device_node *np = dev->dev->of_node;
-	uint32_t scu_rev = 0xffffffff;
-	u32 data;
-	u8 jregd0, jregd1;
-
-	/*
-	 * Find configuration mode and read SCU revision
-	 */
-
-	ast->config_mode = ast_use_defaults;
-
-	/* Check if we have device-tree properties */
-	if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", &data)) {
-		/* We do, disable P2A access */
-		ast->config_mode = ast_use_dt;
-		scu_rev = data;
-	} else if (pdev->device == PCI_CHIP_AST2000) { // Not all families have a P2A bridge
-		/*
-		 * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
-		 * is disabled. We force using P2A if VGA only mode bit
-		 * is set D[7]
-		 */
-		jregd0 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
-		jregd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, 0xff);
-		if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
-
-			/*
-			 * We have a P2A bridge and it is enabled.
-			 */
-
-			/* Patch AST2500/AST2510 */
-			if ((pdev->revision & 0xf0) == 0x40) {
-				if (!(jregd0 & AST_VRAM_INIT_STATUS_MASK))
-					ast_patch_ahb_2500(ast);
-			}
-
-			/* Double check that it's actually working */
-			data = ast_read32(ast, 0xf004);
-			if ((data != 0xffffffff) && (data != 0x00)) {
-				ast->config_mode = ast_use_p2a;
-
-				/* Read SCU7c (silicon revision register) */
-				ast_write32(ast, 0xf004, 0x1e6e0000);
-				ast_write32(ast, 0xf000, 0x1);
-				scu_rev = ast_read32(ast, 0x1207c);
-			}
-		}
-	}
-
-	switch (ast->config_mode) {
-	case ast_use_defaults:
-		drm_info(dev, "Using default configuration\n");
-		break;
-	case ast_use_dt:
-		drm_info(dev, "Using device-tree for configuration\n");
-		break;
-	case ast_use_p2a:
-		drm_info(dev, "Using P2A bridge for configuration\n");
-		break;
-	}
-
-	/*
-	 * Identify chipset
-	 */
-
-	if (pdev->revision >= 0x50) {
-		ast->chip = AST2600;
-		drm_info(dev, "AST 2600 detected\n");
-	} else if (pdev->revision >= 0x40) {
-		switch (scu_rev & 0x300) {
-		case 0x0100:
-			ast->chip = AST2510;
-			drm_info(dev, "AST 2510 detected\n");
-			break;
-		default:
-			ast->chip = AST2500;
-			drm_info(dev, "AST 2500 detected\n");
-		}
-	} else if (pdev->revision >= 0x30) {
-		switch (scu_rev & 0x300) {
-		case 0x0100:
-			ast->chip = AST1400;
-			drm_info(dev, "AST 1400 detected\n");
-			break;
-		default:
-			ast->chip = AST2400;
-			drm_info(dev, "AST 2400 detected\n");
-		}
-	} else if (pdev->revision >= 0x20) {
-		switch (scu_rev & 0x300) {
-		case 0x0000:
-			ast->chip = AST1300;
-			drm_info(dev, "AST 1300 detected\n");
-			break;
-		default:
-			ast->chip = AST2300;
-			drm_info(dev, "AST 2300 detected\n");
-			break;
-		}
-	} else if (pdev->revision >= 0x10) {
-		switch (scu_rev & 0x0300) {
-		case 0x0200:
-			ast->chip = AST1100;
-			drm_info(dev, "AST 1100 detected\n");
-			break;
-		case 0x0100:
-			ast->chip = AST2200;
-			drm_info(dev, "AST 2200 detected\n");
-			break;
-		case 0x0000:
-			ast->chip = AST2150;
-			drm_info(dev, "AST 2150 detected\n");
-			break;
-		default:
-			ast->chip = AST2100;
-			drm_info(dev, "AST 2100 detected\n");
-			break;
-		}
-	} else {
-		ast->chip = AST2000;
-		drm_info(dev, "AST 2000 detected\n");
-	}
-
-	return 0;
-}
-
 static void ast_detect_widescreen(struct ast_device *ast)
 {
 	u8 jreg;
@@ -424,69 +250,27 @@ static int ast_get_dram_info(struct drm_device *dev)
 	return 0;
 }
 
-struct ast_device *ast_device_create(const struct drm_driver *drv,
-				     struct pci_dev *pdev,
-				     unsigned long flags)
+struct drm_device *ast_device_create(struct pci_dev *pdev,
+				     const struct drm_driver *drv,
+				     enum ast_chip chip,
+				     enum ast_config_mode config_mode,
+				     void __iomem *regs,
+				     void __iomem *ioregs,
+				     bool need_post)
 {
 	struct drm_device *dev;
 	struct ast_device *ast;
-	bool need_post = false;
-	int ret = 0;
+	int ret;
 
 	ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
 	if (IS_ERR(ast))
-		return ast;
+		return ERR_CAST(ast);
 	dev = &ast->base;
 
-	pci_set_drvdata(pdev, dev);
-
-	ret = drmm_mutex_init(dev, &ast->ioregs_lock);
-	if (ret)
-		return ERR_PTR(ret);
-
-	ast->regs = pcim_iomap(pdev, 1, 0);
-	if (!ast->regs)
-		return ERR_PTR(-EIO);
-
-	/*
-	 * After AST2500, MMIO is enabled by default, and it should be adopted
-	 * to be compatible with Arm.
-	 */
-	if (pdev->revision >= 0x40) {
-		ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
-	} else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
-		drm_info(dev, "platform has no IO space, trying MMIO\n");
-		ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
-	}
-
-	/* "map" IO regs if the above hasn't done so already */
-	if (!ast->ioregs) {
-		ast->ioregs = pcim_iomap(pdev, 2, 0);
-		if (!ast->ioregs)
-			return ERR_PTR(-EIO);
-	}
-
-	if (!ast_is_vga_enabled(dev)) {
-		drm_info(dev, "VGA not enabled on entry, requesting chip POST\n");
-		need_post = true;
-	}
-
-	/*
-	 * If VGA isn't enabled, we need to enable now or subsequent
-	 * access to the scratch registers will fail.
-	 */
-	if (need_post)
-		ast_enable_vga(dev);
-
-	/* Enable extended register access */
-	ast_open_key(ast);
-	ret = ast_enable_mmio(ast);
-	if (ret)
-		return ERR_PTR(ret);
-
-	ret = ast_device_config_init(ast);
-	if (ret)
-		return ERR_PTR(ret);
+	ast->chip = chip;
+	ast->config_mode = config_mode;
+	ast->regs = regs;
+	ast->ioregs = ioregs;
 
 	ast_detect_widescreen(ast);
 	ast_detect_tx_chip(ast, need_post);
@@ -517,5 +301,5 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
 	if (ret)
 		return ERR_PTR(ret);
 
-	return ast;
+	return dev;
 }
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c20534d0ef7c8af580e4e6a3eb2c4c2bd49842eb..a718646a66b8fd519b255783204bd0ca77228e33 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1358,13 +1358,13 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
 	 * Protect access to I/O registers from concurrent modesetting
 	 * by acquiring the I/O-register lock.
 	 */
-	mutex_lock(&ast->ioregs_lock);
+	mutex_lock(&ast->modeset_lock);
 
 	edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter);
 	if (!edid)
 		goto err_mutex_unlock;
 
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 
 	count = drm_add_edid_modes(connector, edid);
 	kfree(edid);
@@ -1372,7 +1372,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
 	return count;
 
 err_mutex_unlock:
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 err_drm_connector_update_edid_property:
 	drm_connector_update_edid_property(connector, NULL);
 	return 0;
@@ -1464,13 +1464,13 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
 	 * Protect access to I/O registers from concurrent modesetting
 	 * by acquiring the I/O-register lock.
 	 */
-	mutex_lock(&ast->ioregs_lock);
+	mutex_lock(&ast->modeset_lock);
 
 	edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter);
 	if (!edid)
 		goto err_mutex_unlock;
 
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 
 	count = drm_add_edid_modes(connector, edid);
 	kfree(edid);
@@ -1478,7 +1478,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
 	return count;
 
 err_mutex_unlock:
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 err_drm_connector_update_edid_property:
 	drm_connector_update_edid_property(connector, NULL);
 	return 0;
@@ -1670,13 +1670,13 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
 	 * Protect access to I/O registers from concurrent modesetting
 	 * by acquiring the I/O-register lock.
 	 */
-	mutex_lock(&ast->ioregs_lock);
+	mutex_lock(&ast->modeset_lock);
 
 	succ = ast_astdp_read_edid(connector->dev, edid);
 	if (succ < 0)
 		goto err_mutex_unlock;
 
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 
 	drm_connector_update_edid_property(connector, edid);
 	count = drm_add_edid_modes(connector, edid);
@@ -1685,7 +1685,7 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
 	return count;
 
 err_mutex_unlock:
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 	kfree(edid);
 err_drm_connector_update_edid_property:
 	drm_connector_update_edid_property(connector, NULL);
@@ -1870,9 +1870,9 @@ static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *s
 	 * display modes. Protect access to I/O registers by acquiring
 	 * the I/O-register lock. Released in atomic_flush().
 	 */
-	mutex_lock(&ast->ioregs_lock);
+	mutex_lock(&ast->modeset_lock);
 	drm_atomic_helper_commit_tail_rpm(state);
-	mutex_unlock(&ast->ioregs_lock);
+	mutex_unlock(&ast->modeset_lock);
 }
 
 static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
@@ -1910,6 +1910,10 @@ int ast_mode_config_init(struct ast_device *ast)
 	struct drm_connector *physical_connector = NULL;
 	int ret;
 
+	ret = drmm_mutex_init(dev, &ast->modeset_lock);
+	if (ret)
+		return ret;
+
 	ret = drmm_mode_config_init(dev);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 7a993a384314786510fe468b227a7dcec0913e6f..22f548805dfb05a78305125391f5391996c8f4fa 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -77,28 +77,42 @@ ast_set_def_ext_reg(struct drm_device *dev)
 	ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg);
 }
 
-u32 ast_mindwm(struct ast_device *ast, u32 r)
+static u32 __ast_mindwm(void __iomem *regs, u32 r)
 {
-	uint32_t data;
+	u32 data;
 
-	ast_write32(ast, 0xf004, r & 0xffff0000);
-	ast_write32(ast, 0xf000, 0x1);
+	__ast_write32(regs, 0xf004, r & 0xffff0000);
+	__ast_write32(regs, 0xf000, 0x1);
 
 	do {
-		data = ast_read32(ast, 0xf004) & 0xffff0000;
+		data = __ast_read32(regs, 0xf004) & 0xffff0000;
 	} while (data != (r & 0xffff0000));
-	return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+
+	return __ast_read32(regs, 0x10000 + (r & 0x0000ffff));
 }
 
-void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v)
 {
-	uint32_t data;
-	ast_write32(ast, 0xf004, r & 0xffff0000);
-	ast_write32(ast, 0xf000, 0x1);
+	u32 data;
+
+	__ast_write32(regs, 0xf004, r & 0xffff0000);
+	__ast_write32(regs, 0xf000, 0x1);
+
 	do {
-		data = ast_read32(ast, 0xf004) & 0xffff0000;
+		data = __ast_read32(regs, 0xf004) & 0xffff0000;
 	} while (data != (r & 0xffff0000));
-	ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+
+	__ast_write32(regs, 0x10000 + (r & 0x0000ffff), v);
+}
+
+u32 ast_mindwm(struct ast_device *ast, u32 r)
+{
+	return __ast_mindwm(ast->regs, r);
+}
+
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
+{
+	__ast_moutdwm(ast->regs, r, v);
 }
 
 /*
@@ -1987,17 +2001,18 @@ static bool ast_dram_init_2500(struct ast_device *ast)
 	return true;
 }
 
-void ast_patch_ahb_2500(struct ast_device *ast)
+void ast_patch_ahb_2500(void __iomem *regs)
 {
-	u32	data;
+	u32 data;
 
 	/* Clear bus lock condition */
-	ast_moutdwm(ast, 0x1e600000, 0xAEED1A03);
-	ast_moutdwm(ast, 0x1e600084, 0x00010000);
-	ast_moutdwm(ast, 0x1e600088, 0x00000000);
-	ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
-	data = ast_mindwm(ast, 0x1e6e2070);
-	if (data & 0x08000000) {					/* check fast reset */
+	__ast_moutdwm(regs, 0x1e600000, 0xAEED1A03);
+	__ast_moutdwm(regs, 0x1e600084, 0x00010000);
+	__ast_moutdwm(regs, 0x1e600088, 0x00000000);
+	__ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+
+	data = __ast_mindwm(regs, 0x1e6e2070);
+	if (data & 0x08000000) { /* check fast reset */
 		/*
 		 * If "Fast restet" is enabled for ARM-ICE debugger,
 		 * then WDT needs to enable, that
@@ -2009,16 +2024,18 @@ void ast_patch_ahb_2500(struct ast_device *ast)
 		 *	[1]:= 1:WDT will be cleeared and disabled after timeout occurs
 		 *	[0]:= 1:WDT enable
 		 */
-		ast_moutdwm(ast, 0x1E785004, 0x00000010);
-		ast_moutdwm(ast, 0x1E785008, 0x00004755);
-		ast_moutdwm(ast, 0x1E78500c, 0x00000033);
+		__ast_moutdwm(regs, 0x1E785004, 0x00000010);
+		__ast_moutdwm(regs, 0x1E785008, 0x00004755);
+		__ast_moutdwm(regs, 0x1E78500c, 0x00000033);
 		udelay(1000);
 	}
+
 	do {
-		ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8);
-		data = ast_mindwm(ast, 0x1e6e2000);
-	}	while (data != 1);
-	ast_moutdwm(ast, 0x1e6e207c, 0x08000000);	/* clear fast reset */
+		__ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8);
+		data = __ast_mindwm(regs, 0x1e6e2000);
+	} while (data != 1);
+
+	__ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */
 }
 
 void ast_post_chip_2500(struct drm_device *dev)
@@ -2030,7 +2047,7 @@ void ast_post_chip_2500(struct drm_device *dev)
 	reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
 	if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */
 		/* Clear bus lock condition */
-		ast_patch_ahb_2500(ast);
+		ast_patch_ahb_2500(ast->regs);
 
 		/* Disable watchdog */
 		ast_moutdwm(ast, 0x1E78502C, 0x00000000);
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index 555286ecf52098475cf06eb753abf62af634cdd6..62dddbf3fe569d6e2b0337f3f2d104a43696268f 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -10,10 +10,17 @@
  */
 
 #define AST_IO_MM_OFFSET		(0x380)
+#define AST_IO_MM_LENGTH		(128)
 
 #define AST_IO_VGAARI_W			(0x40)
+
 #define AST_IO_VGAMR_W			(0x42)
+#define AST_IO_VGAMR_R			(0x4c)
+#define AST_IO_VGAMR_IOSEL		BIT(0)
+
 #define AST_IO_VGAER			(0x43)
+#define AST_IO_VGAER_VGA_ENABLE		BIT(0)
+
 #define AST_IO_VGASRI			(0x44)
 #define AST_IO_VGADRR			(0x47)
 #define AST_IO_VGADWR			(0x48)
@@ -21,14 +28,15 @@
 #define AST_IO_VGAGRI			(0x4E)
 
 #define AST_IO_VGACRI			(0x54)
+#define AST_IO_VGACR80_PASSWORD		(0xa8)
+#define AST_IO_VGACRA1_VGAIO_DISABLED	BIT(1)
+#define AST_IO_VGACRA1_MMIO_ENABLED	BIT(2)
 #define AST_IO_VGACRCB_HWC_16BPP	BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
 #define AST_IO_VGACRCB_HWC_ENABLED	BIT(1)
 
 #define AST_IO_VGAIR1_R			(0x5A)
 #define AST_IO_VGAIR1_VREFRESH		BIT(3)
 
-#define AST_IO_VGAMR_R			(0x4C)
-
 /*
  * Display Transmitter Type
  */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ba82a1142adf730e9ce8ab31b45bf0eb62405504..19d2dc05c397046f8e7a319f1c3d6ce6d40c0424 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -12,6 +12,23 @@ config DRM_PANEL_BRIDGE
 	help
 	  DRM bridge wrapper of DRM panels
 
+config DRM_AUX_BRIDGE
+	tristate
+	depends on DRM_BRIDGE && OF
+	select AUXILIARY_BUS
+	select DRM_PANEL_BRIDGE
+	help
+	  Simple transparent bridge that is used by several non-DRM drivers to
+	  build bridges chain.
+
+config DRM_AUX_HPD_BRIDGE
+	tristate
+	depends on DRM_BRIDGE && OF
+	select AUXILIARY_BUS
+	help
+	  Simple bridge that terminates the bridge chain and provides HPD
+	  support.
+
 menu "Display Interface Bridges"
 	depends on DRM && DRM_BRIDGE
 
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 2b892b7ed59e805e2b0c18960bddbb4d61530754..017b5832733b2b048dd5bd91d54991e3ec76253f 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_AUX_BRIDGE) += aux-bridge.o
+obj-$(CONFIG_DRM_AUX_HPD_BRIDGE) += aux-hpd-bridge.o
 obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
 obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
 obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8f740154707dbe7651edaf8ee748e090ca34b641..ef31033439bc15a896ed8748b7a62a8b46336c13 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1298,10 +1298,32 @@ static void anx7625_config(struct anx7625_data *ctx)
 			  XTAL_FRQ_SEL, XTAL_FRQ_27M);
 }
 
+static int anx7625_hpd_timer_config(struct anx7625_data *ctx)
+{
+	int ret;
+
+	/* Set irq detect window to 2ms */
+	ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+				HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+				 HPD_DET_TIMER_BIT8_15,
+				 (HPD_TIME >> 8) & 0xFF);
+	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+				 HPD_DET_TIMER_BIT16_23,
+				 (HPD_TIME >> 16) & 0xFF);
+
+	return ret;
+}
+
+static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
+{
+	return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2);
+}
+
 static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
 {
 	struct device *dev = ctx->dev;
-	int ret;
+	int ret, val;
 
 	/* Reset main ocm */
 	ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
@@ -1315,6 +1337,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
 		DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
 	else
 		DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+
+	/*
+	 * Make sure the HPD GPIO already be configured after OCM release before
+	 * setting HPD detect window register. Here we poll the status register
+	 * at maximum 40ms, then config HPD irq detect window register
+	 */
+	readx_poll_timeout(anx7625_read_hpd_gpio_config_status,
+			   ctx, val,
+			   ((val & HPD_SOURCE) || (val < 0)),
+			   2000, 2000 * 20);
+
+	/* Set HPD irq detect window to 2ms */
+	anx7625_hpd_timer_config(ctx);
 }
 
 static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
@@ -1437,20 +1472,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
 
 static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
 {
-	int ret;
-
-	/* Set irq detect window to 2ms */
-	ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
-				HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
-	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
-				 HPD_DET_TIMER_BIT8_15,
-				 (HPD_TIME >> 8) & 0xFF);
-	ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
-				 HPD_DET_TIMER_BIT16_23,
-				 (HPD_TIME >> 16) & 0xFF);
-	if (ret < 0)
-		return ret;
-
 	return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
 }
 
@@ -1464,9 +1485,6 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
 	if (ctx->pdata.intp_irq)
 		return 0;
 
-	/* Delay 200ms for FW HPD de-bounce */
-	msleep(200);
-
 	ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
 				 ctx, val,
 				 ((val & HPD_STATUS) || (val < 0)),
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 5af819611ebce8055c39664b236552fcc00fe0fb..66ebee7f3d832534ec64b780bdfa985bbfcfc896 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -259,6 +259,10 @@
 #define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in  0: no RX in */
 #define AP_DISABLE_PD BIT(6)
 #define AP_DISABLE_DISPLAY BIT(7)
+
+#define GPIO_CTRL_2   0x49
+#define HPD_SOURCE    BIT(6)
+
 /***************************************************************/
 /* Register definition of device address 0x84 */
 #define  MIPI_PHY_CONTROL_3            0x03
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
new file mode 100644
index 0000000000000000000000000000000000000000..49d7c2ab1ecc39a1d3a03a4c3d137bd0ce777aee
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_bridge_ida);
+
+static void drm_aux_bridge_release(struct device *dev)
+{
+	struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+	ida_free(&drm_aux_bridge_ida, adev->id);
+
+	kfree(adev);
+}
+
+static void drm_aux_bridge_unregister_adev(void *_adev)
+{
+	struct auxiliary_device *adev = _adev;
+
+	auxiliary_device_delete(adev);
+	auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_aux_bridge_register - Create a simple bridge device to link the chain
+ * @parent: device instance providing this bridge
+ *
+ * Creates a simple DRM bridge that doesn't implement any drm_bridge
+ * operations. Such bridges merely fill a place in the bridge chain linking
+ * surrounding DRM bridges.
+ *
+ * Return: zero on success, negative error code on failure
+ */
+int drm_aux_bridge_register(struct device *parent)
+{
+	struct auxiliary_device *adev;
+	int ret;
+
+	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+	if (!adev)
+		return -ENOMEM;
+
+	ret = ida_alloc(&drm_aux_bridge_ida, GFP_KERNEL);
+	if (ret < 0) {
+		kfree(adev);
+		return ret;
+	}
+
+	adev->id = ret;
+	adev->name = "aux_bridge";
+	adev->dev.parent = parent;
+	adev->dev.of_node = parent->of_node;
+	adev->dev.release = drm_aux_bridge_release;
+
+	ret = auxiliary_device_init(adev);
+	if (ret) {
+		ida_free(&drm_aux_bridge_ida, adev->id);
+		kfree(adev);
+		return ret;
+	}
+
+	ret = auxiliary_device_add(adev);
+	if (ret) {
+		auxiliary_device_uninit(adev);
+		return ret;
+	}
+
+	return devm_add_action_or_reset(parent, drm_aux_bridge_unregister_adev, adev);
+}
+EXPORT_SYMBOL_GPL(drm_aux_bridge_register);
+
+struct drm_aux_bridge_data {
+	struct drm_bridge bridge;
+	struct drm_bridge *next_bridge;
+	struct device *dev;
+};
+
+static int drm_aux_bridge_attach(struct drm_bridge *bridge,
+				 enum drm_bridge_attach_flags flags)
+{
+	struct drm_aux_bridge_data *data;
+
+	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+		return -EINVAL;
+
+	data = container_of(bridge, struct drm_aux_bridge_data, bridge);
+
+	return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge,
+				 DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+}
+
+static const struct drm_bridge_funcs drm_aux_bridge_funcs = {
+	.attach	= drm_aux_bridge_attach,
+};
+
+static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
+				const struct auxiliary_device_id *id)
+{
+	struct drm_aux_bridge_data *data;
+
+	data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->dev = &auxdev->dev;
+	data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
+	if (IS_ERR(data->next_bridge))
+		return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
+				     "failed to acquire drm_bridge\n");
+
+	data->bridge.funcs = &drm_aux_bridge_funcs;
+	data->bridge.of_node = data->dev->of_node;
+
+	return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_bridge_table[] = {
+	{ .name = KBUILD_MODNAME ".aux_bridge" },
+	{},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_bridge_table);
+
+static struct auxiliary_driver drm_aux_bridge_drv = {
+	.name = "aux_bridge",
+	.id_table = drm_aux_bridge_table,
+	.probe = drm_aux_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM transparent bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
new file mode 100644
index 0000000000000000000000000000000000000000..5d2ab3a715f9cf07349d1d1d8888e6991d569c36
--- /dev/null
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
+
+static DEFINE_IDA(drm_aux_hpd_bridge_ida);
+
+struct drm_aux_hpd_bridge_data {
+	struct drm_bridge bridge;
+	struct device *dev;
+};
+
+static void drm_aux_hpd_bridge_release(struct device *dev)
+{
+	struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+	ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+
+	of_node_put(adev->dev.platform_data);
+
+	kfree(adev);
+}
+
+static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+{
+	struct auxiliary_device *adev = _adev;
+
+	auxiliary_device_delete(adev);
+	auxiliary_device_uninit(adev);
+}
+
+/**
+ * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Creates a simple DRM bridge with the type set to
+ * DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
+ * able to send the HPD events.
+ *
+ * Return: device instance that will handle created bridge or an error code
+ * encoded into the pointer.
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+					  struct device_node *np)
+{
+	struct auxiliary_device *adev;
+	int ret;
+
+	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+	if (!adev)
+		return ERR_PTR(-ENOMEM);
+
+	ret = ida_alloc(&drm_aux_hpd_bridge_ida, GFP_KERNEL);
+	if (ret < 0) {
+		kfree(adev);
+		return ERR_PTR(ret);
+	}
+
+	adev->id = ret;
+	adev->name = "dp_hpd_bridge";
+	adev->dev.parent = parent;
+	adev->dev.of_node = parent->of_node;
+	adev->dev.release = drm_aux_hpd_bridge_release;
+	adev->dev.platform_data = np;
+
+	ret = auxiliary_device_init(adev);
+	if (ret) {
+		ida_free(&drm_aux_hpd_bridge_ida, adev->id);
+		kfree(adev);
+		return ERR_PTR(ret);
+	}
+
+	ret = auxiliary_device_add(adev);
+	if (ret) {
+		auxiliary_device_uninit(adev);
+		return ERR_PTR(ret);
+	}
+
+	ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return &adev->dev;
+}
+EXPORT_SYMBOL_GPL(drm_dp_hpd_bridge_register);
+
+/**
+ * drm_aux_hpd_bridge_notify - notify hot plug detection events
+ * @dev: device created for the HPD bridge
+ * @status: output connection status
+ *
+ * A wrapper around drm_bridge_hpd_notify() that is used to report hot plug
+ * detection events for bridges created via drm_dp_hpd_bridge_register().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+	struct auxiliary_device *adev = to_auxiliary_dev(dev);
+	struct drm_aux_hpd_bridge_data *data = auxiliary_get_drvdata(adev);
+
+	if (!data)
+		return;
+
+	drm_bridge_hpd_notify(&data->bridge, status);
+}
+EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify);
+
+static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge,
+				     enum drm_bridge_attach_flags flags)
+{
+	return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs drm_aux_hpd_bridge_funcs = {
+	.attach	= drm_aux_hpd_bridge_attach,
+};
+
+static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
+				    const struct auxiliary_device_id *id)
+{
+	struct drm_aux_hpd_bridge_data *data;
+
+	data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->dev = &auxdev->dev;
+	data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
+	data->bridge.of_node = dev_get_platdata(data->dev);
+	data->bridge.ops = DRM_BRIDGE_OP_HPD;
+	data->bridge.type = id->driver_data;
+
+	auxiliary_set_drvdata(auxdev, data);
+
+	return devm_drm_bridge_add(data->dev, &data->bridge);
+}
+
+static const struct auxiliary_device_id drm_aux_hpd_bridge_table[] = {
+	{ .name = KBUILD_MODNAME ".dp_hpd_bridge", .driver_data = DRM_MODE_CONNECTOR_DisplayPort, },
+	{},
+};
+MODULE_DEVICE_TABLE(auxiliary, drm_aux_hpd_bridge_table);
+
+static struct auxiliary_driver drm_aux_hpd_bridge_drv = {
+	.name = "aux_hpd_bridge",
+	.id_table = drm_aux_hpd_bridge_table,
+	.probe = drm_aux_hpd_bridge_probe,
+};
+module_auxiliary_driver(drm_aux_hpd_bridge_drv);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_DESCRIPTION("DRM HPD bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 946212a95598142f2375988797b724f9691f60e4..5e3b8edcf79487a4cd9c22a7dd9ef053cf739268 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
 
 static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
 {
-	int ret, tries = 3;
+	int ret = -EINVAL;
+	int tries = 3;
 	u32 i;
 
 	for (i = 0; i < tries; i++) {
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index 3ff30ce80c5b881db41e9e65e0ff2e4ab6432d22..2347f8dd632f9e7608fd40d6ea00f278de32a545 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -226,8 +226,8 @@ dphy_pll_get_configure_from_opts(struct imx93_dsi *dsi,
 	unsigned long fout;
 	unsigned long best_fout = 0;
 	unsigned int fvco_div;
-	unsigned int min_n, max_n, n, best_n;
-	unsigned long m, best_m;
+	unsigned int min_n, max_n, n, best_n = UINT_MAX;
+	unsigned long m, best_m = 0;
 	unsigned long min_delta = ULONG_MAX;
 	unsigned long delta;
 	u64 tmp;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 03532efb893bbc913f2a31a1f491849464a986f2..273157428c8270741d5cafcac33d44695eb774a4 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -43,6 +43,8 @@ struct lt8912 {
 
 	struct videomode mode;
 
+	struct regulator_bulk_data supplies[7];
+
 	u8 data_lanes;
 	bool is_power_on;
 };
@@ -257,6 +259,12 @@ static int lt8912_free_i2c(struct lt8912 *lt)
 
 static int lt8912_hard_power_on(struct lt8912 *lt)
 {
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(lt->supplies), lt->supplies);
+	if (ret)
+		return ret;
+
 	gpiod_set_value_cansleep(lt->gp_reset, 0);
 	msleep(20);
 
@@ -267,6 +275,9 @@ static void lt8912_hard_power_off(struct lt8912 *lt)
 {
 	gpiod_set_value_cansleep(lt->gp_reset, 1);
 	msleep(20);
+
+	regulator_bulk_disable(ARRAY_SIZE(lt->supplies), lt->supplies);
+
 	lt->is_power_on = false;
 }
 
@@ -634,6 +645,48 @@ static const struct drm_bridge_funcs lt8912_bridge_funcs = {
 	.get_edid = lt8912_bridge_get_edid,
 };
 
+static int lt8912_bridge_resume(struct device *dev)
+{
+	struct lt8912 *lt = dev_get_drvdata(dev);
+	int ret;
+
+	ret = lt8912_hard_power_on(lt);
+	if (ret)
+		return ret;
+
+	ret = lt8912_soft_power_on(lt);
+	if (ret)
+		return ret;
+
+	return lt8912_video_on(lt);
+}
+
+static int lt8912_bridge_suspend(struct device *dev)
+{
+	struct lt8912 *lt = dev_get_drvdata(dev);
+
+	lt8912_hard_power_off(lt);
+
+	return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(lt8912_bridge_pm_ops, lt8912_bridge_suspend, lt8912_bridge_resume);
+
+static int lt8912_get_regulators(struct lt8912 *lt)
+{
+	unsigned int i;
+	const char * const supply_names[] = {
+		"vdd", "vccmipirx", "vccsysclk", "vcclvdstx",
+		"vcchdmitx", "vcclvdspll", "vcchdmipll"
+	};
+
+	for (i = 0; i < ARRAY_SIZE(lt->supplies); i++)
+		lt->supplies[i].supply = supply_names[i];
+
+	return devm_regulator_bulk_get(lt->dev, ARRAY_SIZE(lt->supplies),
+				       lt->supplies);
+}
+
 static int lt8912_parse_dt(struct lt8912 *lt)
 {
 	struct gpio_desc *gp_reset;
@@ -685,6 +738,10 @@ static int lt8912_parse_dt(struct lt8912 *lt)
 		goto err_free_host_node;
 	}
 
+	ret = lt8912_get_regulators(lt);
+	if (ret)
+		goto err_free_host_node;
+
 	of_node_put(port_node);
 	return 0;
 
@@ -770,6 +827,7 @@ static struct i2c_driver lt8912_i2c_driver = {
 	.driver = {
 		.name = "lt8912",
 		.of_match_table = lt8912_dt_match,
+		.pm = pm_sleep_ptr(&lt8912_bridge_pm_ops),
 	},
 	.probe = lt8912_probe,
 	.remove = lt8912_remove,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d81920227a8aeb293b1dfbd20403a17545b01ba1..7c0076e499533abf041a2235a62083f0489bbf76 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -54,13 +54,13 @@ static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
 	int ret;
 
 	ret = i2c_master_send(ptn_bridge->client, &addr, 1);
-	if (ret <= 0) {
+	if (ret < 0) {
 		DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
 		return ret;
 	}
 
 	ret = i2c_master_recv(ptn_bridge->client, buf, len);
-	if (ret <= 0) {
+	if (ret < 0) {
 		DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret);
 		return ret;
 	}
@@ -78,7 +78,7 @@ static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr,
 	buf[1] = val;
 
 	ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf));
-	if (ret <= 0) {
+	if (ret < 0) {
 		DRM_ERROR("Failed to send i2c command, ret=%d\n", ret);
 		return ret;
 	}
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index ef2e373606ba327c5eea3bd66abb4296879d1bc0..615cc8f950d7b3e11d174f61559a13088af91179 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2273,7 +2273,7 @@ static int tc_probe(struct i2c_client *client)
 	} else {
 		if (tc->hpd_pin < 0 || tc->hpd_pin > 1) {
 			dev_err(dev, "failed to parse HPD number\n");
-			return ret;
+			return -EINVAL;
 		}
 	}
 
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index c45c07840f645a3216e0f0a8986920f1bd17d997..5b8e1dfc458da996ef1bf4bba92c054f5a991901 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1413,11 +1413,9 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 	int ret;
 
 	if (!pdata->pwm_enabled) {
-		ret = pm_runtime_get_sync(pdata->dev);
-		if (ret < 0) {
-			pm_runtime_put_sync(pdata->dev);
+		ret = pm_runtime_resume_and_get(pdata->dev);
+		if (ret < 0)
 			return ret;
-		}
 	}
 
 	if (state->enabled) {
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
deleted file mode 100644
index a4ad6fd13abcf63c88e91900273acc88f861262e..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * \file drm_agpsupport.c
- * DRM support for AGP/GART backend
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-/*
- * Get AGP information.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been initialized and acquired and fills in the
- * drm_agp_info structure with the information in drm_agp_head::agp_info.
- */
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
-{
-	struct agp_kern_info *kern;
-
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-
-	kern = &dev->agp->agp_info;
-	info->agp_version_major = kern->version.major;
-	info->agp_version_minor = kern->version.minor;
-	info->mode = kern->mode;
-	info->aperture_base = kern->aper_base;
-	info->aperture_size = kern->aper_size * 1024 * 1024;
-	info->memory_allowed = kern->max_memory << PAGE_SHIFT;
-	info->memory_used = kern->current_memory << PAGE_SHIFT;
-	info->id_vendor = kern->device->vendor;
-	info->id_device = kern->device->device;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_info);
-
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv)
-{
-	struct drm_agp_info *info = data;
-	int err;
-
-	err = drm_legacy_agp_info(dev, info);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-/*
- * Acquire the AGP device.
- *
- * \param dev DRM device that is to acquire AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire(struct drm_device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev->dev);
-
-	if (!dev->agp)
-		return -ENODEV;
-	if (dev->agp->acquired)
-		return -EBUSY;
-	dev->agp->bridge = agp_backend_acquire(pdev);
-	if (!dev->agp->bridge)
-		return -ENODEV;
-	dev->agp->acquired = 1;
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_acquire);
-
-/*
- * Acquire the AGP device (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device hasn't been acquired before and calls
- * \c agp_backend_acquire.
- */
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *file_priv)
-{
-	return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
-}
-
-/*
- * Release the AGP device.
- *
- * \param dev DRM device that is to release AGP.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired and calls \c agp_backend_release.
- */
-int drm_legacy_agp_release(struct drm_device *dev)
-{
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-	agp_backend_release(dev->agp->bridge);
-	dev->agp->acquired = 0;
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_release);
-
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *file_priv)
-{
-	return drm_legacy_agp_release(dev);
-}
-
-/*
- * Enable the AGP bus.
- *
- * \param dev DRM device that has previously acquired AGP.
- * \param mode Requested AGP mode.
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device has been acquired but not enabled, and calls
- * \c agp_enable.
- */
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
-{
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-
-	dev->agp->mode = mode.mode;
-	agp_enable(dev->agp->bridge, mode.mode);
-	dev->agp->enabled = 1;
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_enable);
-
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv)
-{
-	struct drm_agp_mode *mode = data;
-
-	return drm_legacy_agp_enable(dev, *mode);
-}
-
-/*
- * Allocate AGP memory.
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired, allocates the
- * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
- */
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
-{
-	struct drm_agp_mem *entry;
-	struct agp_memory *memory;
-	unsigned long pages;
-	u32 type;
-
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-
-	pages = DIV_ROUND_UP(request->size, PAGE_SIZE);
-	type = (u32) request->type;
-	memory = agp_allocate_memory(dev->agp->bridge, pages, type);
-	if (!memory) {
-		kfree(entry);
-		return -ENOMEM;
-	}
-
-	entry->handle = (unsigned long)memory->key + 1;
-	entry->memory = memory;
-	entry->bound = 0;
-	entry->pages = pages;
-	list_add(&entry->head, &dev->agp->memory);
-
-	request->handle = entry->handle;
-	request->physical = memory->physical;
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_alloc);
-
-
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_agp_buffer *request = data;
-
-	return drm_legacy_agp_alloc(dev, request);
-}
-
-/*
- * Search for the AGP memory entry associated with a handle.
- *
- * \param dev DRM device structure.
- * \param handle AGP memory handle.
- * \return pointer to the drm_agp_mem structure associated with \p handle.
- *
- * Walks through drm_agp_head::memory until finding a matching handle.
- */
-static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
-						       unsigned long handle)
-{
-	struct drm_agp_mem *entry;
-
-	list_for_each_entry(entry, &dev->agp->memory, head) {
-		if (entry->handle == handle)
-			return entry;
-	}
-	return NULL;
-}
-
-/*
- * Unbind AGP memory from the GATT (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and acquired, looks-up the AGP memory
- * entry and passes it to the unbind_agp() function.
- */
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
-{
-	struct drm_agp_mem *entry;
-	int ret;
-
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-	entry = drm_legacy_agp_lookup_entry(dev, request->handle);
-	if (!entry || !entry->bound)
-		return -EINVAL;
-	ret = agp_unbind_memory(entry->memory);
-	if (ret == 0)
-		entry->bound = 0;
-	return ret;
-}
-EXPORT_SYMBOL(drm_legacy_agp_unbind);
-
-
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv)
-{
-	struct drm_agp_binding *request = data;
-
-	return drm_legacy_agp_unbind(dev, request);
-}
-
-/*
- * Bind AGP memory into the GATT (ioctl)
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and that no memory
- * is currently bound into the GATT. Looks-up the AGP memory entry and passes
- * it to bind_agp() function.
- */
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
-{
-	struct drm_agp_mem *entry;
-	int retcode;
-	int page;
-
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-	entry = drm_legacy_agp_lookup_entry(dev, request->handle);
-	if (!entry || entry->bound)
-		return -EINVAL;
-	page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
-	retcode = agp_bind_memory(entry->memory, page);
-	if (retcode)
-		return retcode;
-	entry->bound = dev->agp->base + (page << PAGE_SHIFT);
-	DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
-		  dev->agp->base, entry->bound);
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_bind);
-
-
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv)
-{
-	struct drm_agp_binding *request = data;
-
-	return drm_legacy_agp_bind(dev, request);
-}
-
-/*
- * Free AGP memory (ioctl).
- *
- * \return zero on success or a negative number on failure.
- *
- * Verifies the AGP device is present and has been acquired and looks up the
- * AGP memory entry. If the memory is currently bound, unbind it via
- * unbind_agp(). Frees it via free_agp() as well as the entry itself
- * and unlinks from the doubly linked list it's inserted in.
- */
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
-{
-	struct drm_agp_mem *entry;
-
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-	entry = drm_legacy_agp_lookup_entry(dev, request->handle);
-	if (!entry)
-		return -EINVAL;
-	if (entry->bound)
-		agp_unbind_memory(entry->memory);
-
-	list_del(&entry->head);
-
-	agp_free_memory(entry->memory);
-	kfree(entry);
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_agp_free);
-
-
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv)
-{
-	struct drm_agp_buffer *request = data;
-
-	return drm_legacy_agp_free(dev, request);
-}
-
-/*
- * Initialize the AGP resources.
- *
- * \return pointer to a drm_agp_head structure.
- *
- * Gets the drm_agp_t structure which is made available by the agpgart module
- * via the inter_module_* functions. Creates and initializes a drm_agp_head
- * structure.
- *
- * Note that final cleanup of the kmalloced structure is directly done in
- * drm_pci_agp_destroy.
- */
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev->dev);
-	struct drm_agp_head *head = NULL;
-
-	head = kzalloc(sizeof(*head), GFP_KERNEL);
-	if (!head)
-		return NULL;
-	head->bridge = agp_find_bridge(pdev);
-	if (!head->bridge) {
-		head->bridge = agp_backend_acquire(pdev);
-		if (!head->bridge) {
-			kfree(head);
-			return NULL;
-		}
-		agp_copy_info(head->bridge, &head->agp_info);
-		agp_backend_release(head->bridge);
-	} else {
-		agp_copy_info(head->bridge, &head->agp_info);
-	}
-	if (head->agp_info.chipset == NOT_SUPPORTED) {
-		kfree(head);
-		return NULL;
-	}
-	INIT_LIST_HEAD(&head->memory);
-	head->cant_use_aperture = head->agp_info.cant_use_aperture;
-	head->page_mask = head->agp_info.page_mask;
-	head->base = head->agp_info.aper_base;
-	return head;
-}
-/* Only exported for i810.ko */
-EXPORT_SYMBOL(drm_legacy_agp_init);
-
-/**
- * drm_legacy_agp_clear - Clear AGP resource list
- * @dev: DRM device
- *
- * Iterate over all AGP resources and remove them. But keep the AGP head
- * intact so it can still be used. It is safe to call this if AGP is disabled or
- * was already removed.
- *
- * Cleanup is only done for drivers who have DRIVER_LEGACY set.
- */
-void drm_legacy_agp_clear(struct drm_device *dev)
-{
-	struct drm_agp_mem *entry, *tempe;
-
-	if (!dev->agp)
-		return;
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
-		if (entry->bound)
-			agp_unbind_memory(entry->memory);
-		agp_free_memory(entry->memory);
-		kfree(entry);
-	}
-	INIT_LIST_HEAD(&dev->agp->memory);
-
-	if (dev->agp->acquired)
-		drm_legacy_agp_release(dev);
-
-	dev->agp->acquired = 0;
-	dev->agp->enabled = 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f1a503aafe5aa18526a5bbe92a79c475433906c4..c31fc0b48c316ae210892efbb65e2b6962595b8a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1773,6 +1773,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
 	struct drm_crtc *crtc;
 	struct drm_connector *connector;
 	struct drm_connector_list_iter conn_iter;
+	struct drm_private_obj *obj;
 
 	if (!drm_drv_uses_atomic_modeset(dev))
 		return;
@@ -1801,6 +1802,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
 	if (take_locks)
 		drm_modeset_unlock(&dev->mode_config.connection_mutex);
 	drm_connector_list_iter_end(&conn_iter);
+
+	list_for_each_entry(obj, &config->privobj_list, head) {
+		if (take_locks)
+			drm_modeset_lock(&obj->lock, NULL);
+		drm_atomic_private_obj_print_state(p, obj->state);
+		if (take_locks)
+			drm_modeset_unlock(&obj->lock);
+	}
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 784e63d70a421ea0272e64e1abd5b4ee113b45dc..54975de44a0e34f5fb3d6f29e39d6c5bd865ace9 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -275,6 +275,20 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
 			plane_state->normalized_zpos = val;
 		}
 	}
+
+	if (plane->hotspot_x_property) {
+		if (!drm_object_property_get_default_value(&plane->base,
+							   plane->hotspot_x_property,
+							   &val))
+			plane_state->hotspot_x = val;
+	}
+
+	if (plane->hotspot_y_property) {
+		if (!drm_object_property_get_default_value(&plane->base,
+							   plane->hotspot_y_property,
+							   &val))
+			plane_state->hotspot_y = val;
+	}
 }
 EXPORT_SYMBOL(__drm_atomic_helper_plane_state_reset);
 
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 98d3b10c08ae194071777b4abb2ed8c2e43286a5..aee4a65d495910081157d8f937ec86e38b5b4719 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -593,6 +593,22 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
 	} else if (plane->funcs->atomic_set_property) {
 		return plane->funcs->atomic_set_property(plane, state,
 				property, val);
+	} else if (property == plane->hotspot_x_property) {
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			drm_dbg_atomic(plane->dev,
+				       "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+				       plane->base.id, plane->name, val);
+			return -EINVAL;
+		}
+		state->hotspot_x = val;
+	} else if (property == plane->hotspot_y_property) {
+		if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+			drm_dbg_atomic(plane->dev,
+				       "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n",
+				       plane->base.id, plane->name, val);
+			return -EINVAL;
+		}
+		state->hotspot_y = val;
 	} else {
 		drm_dbg_atomic(plane->dev,
 			       "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -653,6 +669,10 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
 		*val = state->scaling_filter;
 	} else if (plane->funcs->atomic_get_property) {
 		return plane->funcs->atomic_get_property(plane, state, property, val);
+	} else if (property == plane->hotspot_x_property) {
+		*val = state->hotspot_x;
+	} else if (property == plane->hotspot_y_property) {
+		*val = state->hotspot_y;
 	} else {
 		drm_dbg_atomic(dev,
 			       "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n",
@@ -1006,13 +1026,28 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
 	return ret;
 }
 
+static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value,
+					 struct drm_property *prop)
+{
+	if (ret != 0 || old_val != prop_value) {
+		drm_dbg_atomic(prop->dev,
+			       "[PROP:%d:%s] No prop can be changed during async flip\n",
+			       prop->base.id, prop->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int drm_atomic_set_property(struct drm_atomic_state *state,
 			    struct drm_file *file_priv,
 			    struct drm_mode_object *obj,
 			    struct drm_property *prop,
-			    uint64_t prop_value)
+			    u64 prop_value,
+			    bool async_flip)
 {
 	struct drm_mode_object *ref;
+	u64 old_val;
 	int ret;
 
 	if (!drm_property_change_valid_get(prop, prop_value, &ref))
@@ -1029,6 +1064,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 			break;
 		}
 
+		if (async_flip) {
+			ret = drm_atomic_connector_get_property(connector, connector_state,
+								prop, &old_val);
+			ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+			break;
+		}
+
 		ret = drm_atomic_connector_set_property(connector,
 				connector_state, file_priv,
 				prop, prop_value);
@@ -1044,6 +1086,13 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 			break;
 		}
 
+		if (async_flip) {
+			ret = drm_atomic_crtc_get_property(crtc, crtc_state,
+							   prop, &old_val);
+			ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+			break;
+		}
+
 		ret = drm_atomic_crtc_set_property(crtc,
 				crtc_state, prop, prop_value);
 		break;
@@ -1051,6 +1100,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 	case DRM_MODE_OBJECT_PLANE: {
 		struct drm_plane *plane = obj_to_plane(obj);
 		struct drm_plane_state *plane_state;
+		struct drm_mode_config *config = &plane->dev->mode_config;
 
 		plane_state = drm_atomic_get_plane_state(state, plane);
 		if (IS_ERR(plane_state)) {
@@ -1058,6 +1108,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 			break;
 		}
 
+		if (async_flip && prop != config->prop_fb_id) {
+			ret = drm_atomic_plane_get_property(plane, plane_state,
+							    prop, &old_val);
+			ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
+			break;
+		}
+
+		if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
+			drm_dbg_atomic(prop->dev,
+				       "[OBJECT:%d] Only primary planes can be changed during async flip\n",
+				       obj->id);
+			ret = -EINVAL;
+			break;
+		}
+
 		ret = drm_atomic_plane_set_property(plane,
 				plane_state, file_priv,
 				prop, prop_value);
@@ -1323,6 +1388,18 @@ static void complete_signaling(struct drm_device *dev,
 	kfree(fence_state);
 }
 
+static void
+set_async_flip(struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		crtc_state->async_flip = true;
+	}
+}
+
 int drm_mode_atomic_ioctl(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv)
 {
@@ -1337,6 +1414,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 	struct drm_out_fence_state *fence_state;
 	int ret = 0;
 	unsigned int i, j, num_fences;
+	bool async_flip = false;
 
 	/* disallow for drivers not supporting atomic: */
 	if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1363,9 +1441,13 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 	}
 
 	if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) {
-		drm_dbg_atomic(dev,
-			       "commit failed: invalid flag DRM_MODE_PAGE_FLIP_ASYNC\n");
-		return -EINVAL;
+		if (!dev->mode_config.async_page_flip) {
+			drm_dbg_atomic(dev,
+				       "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n");
+			return -EINVAL;
+		}
+
+		async_flip = true;
 	}
 
 	/* can't test and expect an event at the same time. */
@@ -1450,8 +1532,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 				goto out;
 			}
 
-			ret = drm_atomic_set_property(state, file_priv,
-						      obj, prop, prop_value);
+			ret = drm_atomic_set_property(state, file_priv, obj,
+						      prop, prop_value, async_flip);
 			if (ret) {
 				drm_mode_object_put(obj);
 				goto out;
@@ -1468,6 +1550,9 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
 	if (ret)
 		goto out;
 
+	if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC)
+		set_async_flip(state);
+
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		ret = drm_atomic_check_only(state);
 	} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 2ed2585ded3784882dd90260e070e27017a6d1f2..252c105d614ff2bd8acb7e89a56dd24cac9efb5c 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -37,13 +37,12 @@
 #include <drm/drm_print.h>
 
 #include "drm_internal.h"
-#include "drm_legacy.h"
 
 /**
  * DOC: master and authentication
  *
  * &struct drm_master is used to track groups of clients with open
- * primary/legacy device nodes. For every &struct drm_file which has had at
+ * primary device nodes. For every &struct drm_file which has had at
  * least once successfully became the device master (either through the
  * SET_MASTER IOCTL, or implicitly through opening the primary device node when
  * no one else is the current master that time) there exists one &drm_master.
@@ -139,7 +138,6 @@ struct drm_master *drm_master_create(struct drm_device *dev)
 		return NULL;
 
 	kref_init(&master->refcount);
-	drm_master_legacy_init(master);
 	idr_init_base(&master->magic_map, 1);
 	master->dev = dev;
 
@@ -365,8 +363,6 @@ void drm_master_release(struct drm_file *file_priv)
 	if (!drm_is_current_master_locked(file_priv))
 		goto out;
 
-	drm_legacy_lock_master_cleanup(dev, master);
-
 	if (dev->master == file_priv->master)
 		drm_drop_master(dev, file_priv);
 out:
@@ -429,8 +425,6 @@ static void drm_master_destroy(struct kref *kref)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		drm_lease_destroy(master);
 
-	drm_legacy_master_rmmaps(dev, master);
-
 	idr_destroy(&master->magic_map);
 	idr_destroy(&master->leases);
 	idr_destroy(&master->lessee_idr);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 30d66bee0ec6a9d750947dd92256ad7ee5168a42..cee3188adf3d54bca2b6cb22e51329295d980589 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -1347,50 +1347,6 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
 EXPORT_SYMBOL(of_drm_find_bridge);
 #endif
 
-#ifdef CONFIG_DEBUG_FS
-static int drm_bridge_chains_info(struct seq_file *m, void *data)
-{
-	struct drm_debugfs_entry *entry = m->private;
-	struct drm_device *dev = entry->dev;
-	struct drm_printer p = drm_seq_file_printer(m);
-	struct drm_mode_config *config = &dev->mode_config;
-	struct drm_encoder *encoder;
-	unsigned int bridge_idx = 0;
-
-	list_for_each_entry(encoder, &config->encoder_list, head) {
-		struct drm_bridge *bridge;
-
-		drm_printf(&p, "encoder[%u]\n", encoder->base.id);
-
-		drm_for_each_bridge_in_chain(encoder, bridge) {
-			drm_printf(&p, "\tbridge[%u] type: %u, ops: %#x",
-				   bridge_idx, bridge->type, bridge->ops);
-
-#ifdef CONFIG_OF
-			if (bridge->of_node)
-				drm_printf(&p, ", OF: %pOFfc", bridge->of_node);
-#endif
-
-			drm_printf(&p, "\n");
-
-			bridge_idx++;
-		}
-	}
-
-	return 0;
-}
-
-static const struct drm_debugfs_info drm_bridge_debugfs_list[] = {
-	{ "bridge_chains", drm_bridge_chains_info, 0 },
-};
-
-void drm_bridge_debugfs_init(struct drm_device *dev)
-{
-	drm_debugfs_add_files(dev, drm_bridge_debugfs_list,
-			      ARRAY_SIZE(drm_bridge_debugfs_list));
-}
-#endif
-
 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
 MODULE_DESCRIPTION("DRM bridge infrastructure");
 MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 8239ad43aed58c4112cc4e791df3889dead6aa5c..3acd67021ec62fbb4699cd7b05e97c3632d36511 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -198,12 +198,6 @@ static void drm_bridge_connector_destroy(struct drm_connector *connector)
 	struct drm_bridge_connector *bridge_connector =
 		to_drm_bridge_connector(connector);
 
-	if (bridge_connector->bridge_hpd) {
-		struct drm_bridge *hpd = bridge_connector->bridge_hpd;
-
-		drm_bridge_hpd_disable(hpd);
-	}
-
 	drm_connector_unregister(connector);
 	drm_connector_cleanup(connector);
 
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
deleted file mode 100644
index 86700560fea28d3b3d5f6bad439772797db278be..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_bufs.c
+++ /dev/null
@@ -1,1627 +0,0 @@
-/*
- * Legacy: Generic DRM Buffer Management
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/log2.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/nospec.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-
-#include <asm/shmparam.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
-						  struct drm_local_map *map)
-{
-	struct drm_map_list *entry;
-
-	list_for_each_entry(entry, &dev->maplist, head) {
-		/*
-		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
-		 * while PCI resources may live above that, we only compare the
-		 * lower 32 bits of the map offset for maps of type
-		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
-		 * It is assumed that if a driver have more than one resource
-		 * of each type, the lower 32 bits are different.
-		 */
-		if (!entry->map ||
-		    map->type != entry->map->type ||
-		    entry->master != dev->master)
-			continue;
-		switch (map->type) {
-		case _DRM_SHM:
-			if (map->flags != _DRM_CONTAINS_LOCK)
-				break;
-			return entry;
-		case _DRM_REGISTERS:
-		case _DRM_FRAME_BUFFER:
-			if ((entry->map->offset & 0xffffffff) ==
-			    (map->offset & 0xffffffff))
-				return entry;
-			break;
-		default: /* Make gcc happy */
-			break;
-		}
-		if (entry->map->offset == map->offset)
-			return entry;
-	}
-
-	return NULL;
-}
-
-static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
-			  unsigned long user_token, int hashed_handle, int shm)
-{
-	int use_hashed_handle, shift;
-	unsigned long add;
-
-#if (BITS_PER_LONG == 64)
-	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
-#elif (BITS_PER_LONG == 32)
-	use_hashed_handle = hashed_handle;
-#else
-#error Unsupported long size. Neither 64 nor 32 bits.
-#endif
-
-	if (!use_hashed_handle) {
-		int ret;
-
-		hash->key = user_token >> PAGE_SHIFT;
-		ret = drm_ht_insert_item(&dev->map_hash, hash);
-		if (ret != -EINVAL)
-			return ret;
-	}
-
-	shift = 0;
-	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
-	if (shm && (SHMLBA > PAGE_SIZE)) {
-		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
-
-		/* For shared memory, we have to preserve the SHMLBA
-		 * bits of the eventual vma->vm_pgoff value during
-		 * mmap().  Otherwise we run into cache aliasing problems
-		 * on some platforms.  On these platforms, the pgoff of
-		 * a mmap() request is used to pick a suitable virtual
-		 * address for the mmap() region such that it will not
-		 * cause cache aliasing problems.
-		 *
-		 * Therefore, make sure the SHMLBA relevant bits of the
-		 * hash value we use are equal to those in the original
-		 * kernel virtual address.
-		 */
-		shift = bits;
-		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
-	}
-
-	return drm_ht_just_insert_please(&dev->map_hash, hash,
-					 user_token, 32 - PAGE_SHIFT - 3,
-					 shift, add);
-}
-
-/*
- * Core function to create a range of memory available for mapping by a
- * non-root process.
- *
- * Adjusts the memory offset to its absolute value according to the mapping
- * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
- * applicable and if supported by the kernel.
- */
-static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
-			   unsigned int size, enum drm_map_type type,
-			   enum drm_map_flags flags,
-			   struct drm_map_list **maplist)
-{
-	struct drm_local_map *map;
-	struct drm_map_list *list;
-	unsigned long user_token;
-	int ret;
-
-	map = kmalloc(sizeof(*map), GFP_KERNEL);
-	if (!map)
-		return -ENOMEM;
-
-	map->offset = offset;
-	map->size = size;
-	map->flags = flags;
-	map->type = type;
-
-	/* Only allow shared memory to be removable since we only keep enough
-	 * book keeping information about shared memory to allow for removal
-	 * when processes fork.
-	 */
-	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
-		kfree(map);
-		return -EINVAL;
-	}
-	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
-		  (unsigned long long)map->offset, map->size, map->type);
-
-	/* page-align _DRM_SHM maps. They are allocated here so there is no security
-	 * hole created by that and it works around various broken drivers that use
-	 * a non-aligned quantity to map the SAREA. --BenH
-	 */
-	if (map->type == _DRM_SHM)
-		map->size = PAGE_ALIGN(map->size);
-
-	if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
-		kfree(map);
-		return -EINVAL;
-	}
-	map->mtrr = -1;
-	map->handle = NULL;
-
-	switch (map->type) {
-	case _DRM_REGISTERS:
-	case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
-		if (map->offset + (map->size-1) < map->offset ||
-		    map->offset < virt_to_phys(high_memory)) {
-			kfree(map);
-			return -EINVAL;
-		}
-#endif
-		/* Some drivers preinitialize some maps, without the X Server
-		 * needing to be aware of it.  Therefore, we just return success
-		 * when the server tries to create a duplicate map.
-		 */
-		list = drm_find_matching_map(dev, map);
-		if (list != NULL) {
-			if (list->map->size != map->size) {
-				DRM_DEBUG("Matching maps of type %d with "
-					  "mismatched sizes, (%ld vs %ld)\n",
-					  map->type, map->size,
-					  list->map->size);
-				list->map->size = map->size;
-			}
-
-			kfree(map);
-			*maplist = list;
-			return 0;
-		}
-
-		if (map->type == _DRM_FRAME_BUFFER ||
-		    (map->flags & _DRM_WRITE_COMBINING)) {
-			map->mtrr =
-				arch_phys_wc_add(map->offset, map->size);
-		}
-		if (map->type == _DRM_REGISTERS) {
-			if (map->flags & _DRM_WRITE_COMBINING)
-				map->handle = ioremap_wc(map->offset,
-							 map->size);
-			else
-				map->handle = ioremap(map->offset, map->size);
-			if (!map->handle) {
-				kfree(map);
-				return -ENOMEM;
-			}
-		}
-
-		break;
-	case _DRM_SHM:
-		list = drm_find_matching_map(dev, map);
-		if (list != NULL) {
-			if (list->map->size != map->size) {
-				DRM_DEBUG("Matching maps of type %d with "
-					  "mismatched sizes, (%ld vs %ld)\n",
-					  map->type, map->size, list->map->size);
-				list->map->size = map->size;
-			}
-
-			kfree(map);
-			*maplist = list;
-			return 0;
-		}
-		map->handle = vmalloc_user(map->size);
-		DRM_DEBUG("%lu %d %p\n",
-			  map->size, order_base_2(map->size), map->handle);
-		if (!map->handle) {
-			kfree(map);
-			return -ENOMEM;
-		}
-		map->offset = (unsigned long)map->handle;
-		if (map->flags & _DRM_CONTAINS_LOCK) {
-			/* Prevent a 2nd X Server from creating a 2nd lock */
-			if (dev->master->lock.hw_lock != NULL) {
-				vfree(map->handle);
-				kfree(map);
-				return -EBUSY;
-			}
-			dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;	/* Pointer to lock */
-		}
-		break;
-	case _DRM_AGP: {
-		struct drm_agp_mem *entry;
-		int valid = 0;
-
-		if (!dev->agp) {
-			kfree(map);
-			return -EINVAL;
-		}
-#ifdef __alpha__
-		map->offset += dev->hose->mem_space->start;
-#endif
-		/* In some cases (i810 driver), user space may have already
-		 * added the AGP base itself, because dev->agp->base previously
-		 * only got set during AGP enable.  So, only add the base
-		 * address if the map's offset isn't already within the
-		 * aperture.
-		 */
-		if (map->offset < dev->agp->base ||
-		    map->offset > dev->agp->base +
-		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
-			map->offset += dev->agp->base;
-		}
-		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
-
-		/* This assumes the DRM is in total control of AGP space.
-		 * It's not always the case as AGP can be in the control
-		 * of user space (i.e. i810 driver). So this loop will get
-		 * skipped and we double check that dev->agp->memory is
-		 * actually set as well as being invalid before EPERM'ing
-		 */
-		list_for_each_entry(entry, &dev->agp->memory, head) {
-			if ((map->offset >= entry->bound) &&
-			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
-				valid = 1;
-				break;
-			}
-		}
-		if (!list_empty(&dev->agp->memory) && !valid) {
-			kfree(map);
-			return -EPERM;
-		}
-		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
-			  (unsigned long long)map->offset, map->size);
-
-		break;
-	}
-	case _DRM_SCATTER_GATHER:
-		if (!dev->sg) {
-			kfree(map);
-			return -EINVAL;
-		}
-		map->offset += (unsigned long)dev->sg->virtual;
-		break;
-	case _DRM_CONSISTENT:
-		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
-		 * As we're limiting the address to 2^32-1 (or less),
-		 * casting it down to 32 bits is no problem, but we
-		 * need to point to a 64bit variable first.
-		 */
-		map->handle = dma_alloc_coherent(dev->dev,
-						 map->size,
-						 &map->offset,
-						 GFP_KERNEL);
-		if (!map->handle) {
-			kfree(map);
-			return -ENOMEM;
-		}
-		break;
-	default:
-		kfree(map);
-		return -EINVAL;
-	}
-
-	list = kzalloc(sizeof(*list), GFP_KERNEL);
-	if (!list) {
-		if (map->type == _DRM_REGISTERS)
-			iounmap(map->handle);
-		kfree(map);
-		return -EINVAL;
-	}
-	list->map = map;
-
-	mutex_lock(&dev->struct_mutex);
-	list_add(&list->head, &dev->maplist);
-
-	/* Assign a 32-bit handle */
-	/* We do it here so that dev->struct_mutex protects the increment */
-	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
-		map->offset;
-	ret = drm_map_handle(dev, &list->hash, user_token, 0,
-			     (map->type == _DRM_SHM));
-	if (ret) {
-		if (map->type == _DRM_REGISTERS)
-			iounmap(map->handle);
-		kfree(map);
-		kfree(list);
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
-
-	list->user_token = list->hash.key << PAGE_SHIFT;
-	mutex_unlock(&dev->struct_mutex);
-
-	if (!(map->flags & _DRM_DRIVER))
-		list->master = dev->master;
-	*maplist = list;
-	return 0;
-}
-
-int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
-		      unsigned int size, enum drm_map_type type,
-		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
-{
-	struct drm_map_list *list;
-	int rc;
-
-	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
-	if (!rc)
-		*map_ptr = list->map;
-	return rc;
-}
-EXPORT_SYMBOL(drm_legacy_addmap);
-
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
-					 unsigned int token)
-{
-	struct drm_map_list *_entry;
-
-	list_for_each_entry(_entry, &dev->maplist, head)
-		if (_entry->user_token == token)
-			return _entry->map;
-	return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_findmap);
-
-/*
- * Ioctl to specify a range of memory that is available for mapping by a
- * non-root process.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_map structure.
- * \return zero on success or a negative value on error.
- *
- */
-int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv)
-{
-	struct drm_map *map = data;
-	struct drm_map_list *maplist;
-	int err;
-
-	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
-		return -EPERM;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	err = drm_addmap_core(dev, map->offset, map->size, map->type,
-			      map->flags, &maplist);
-
-	if (err)
-		return err;
-
-	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
-	map->handle = (void *)(unsigned long)maplist->user_token;
-
-	/*
-	 * It appears that there are no users of this value whatsoever --
-	 * drmAddMap just discards it.  Let's not encourage its use.
-	 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
-	 *  it's not a real mtrr index anymore.)
-	 */
-	map->mtrr = -1;
-
-	return 0;
-}
-
-/*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv)
-{
-	struct drm_map *map = data;
-	struct drm_map_list *r_list = NULL;
-	struct list_head *list;
-	int idx;
-	int i;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	idx = map->offset;
-	if (idx < 0)
-		return -EINVAL;
-
-	i = 0;
-	mutex_lock(&dev->struct_mutex);
-	list_for_each(list, &dev->maplist) {
-		if (i == idx) {
-			r_list = list_entry(list, struct drm_map_list, head);
-			break;
-		}
-		i++;
-	}
-	if (!r_list || !r_list->map) {
-		mutex_unlock(&dev->struct_mutex);
-		return -EINVAL;
-	}
-
-	map->offset = r_list->map->offset;
-	map->size = r_list->map->size;
-	map->type = r_list->map->type;
-	map->flags = r_list->map->flags;
-	map->handle = (void *)(unsigned long) r_list->user_token;
-	map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-/*
- * Remove a map private from list and deallocate resources if the mapping
- * isn't in use.
- *
- * Searches the map on drm_device::maplist, removes it from the list, see if
- * it's being used, and free any associated resource (such as MTRR's) if it's not
- * being on use.
- *
- * \sa drm_legacy_addmap
- */
-int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
-{
-	struct drm_map_list *r_list = NULL, *list_t;
-	int found = 0;
-	struct drm_master *master;
-
-	/* Find the list entry for the map and remove it */
-	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
-		if (r_list->map == map) {
-			master = r_list->master;
-			list_del(&r_list->head);
-			drm_ht_remove_key(&dev->map_hash,
-					  r_list->user_token >> PAGE_SHIFT);
-			kfree(r_list);
-			found = 1;
-			break;
-		}
-	}
-
-	if (!found)
-		return -EINVAL;
-
-	switch (map->type) {
-	case _DRM_REGISTERS:
-		iounmap(map->handle);
-		fallthrough;
-	case _DRM_FRAME_BUFFER:
-		arch_phys_wc_del(map->mtrr);
-		break;
-	case _DRM_SHM:
-		vfree(map->handle);
-		if (master) {
-			if (dev->sigdata.lock == master->lock.hw_lock)
-				dev->sigdata.lock = NULL;
-			master->lock.hw_lock = NULL;   /* SHM removed */
-			master->lock.file_priv = NULL;
-			wake_up_interruptible_all(&master->lock.lock_queue);
-		}
-		break;
-	case _DRM_AGP:
-	case _DRM_SCATTER_GATHER:
-		break;
-	case _DRM_CONSISTENT:
-		dma_free_coherent(dev->dev,
-				  map->size,
-				  map->handle,
-				  map->offset);
-		break;
-	}
-	kfree(map);
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_rmmap_locked);
-
-void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
-{
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	mutex_lock(&dev->struct_mutex);
-	drm_legacy_rmmap_locked(dev, map);
-	mutex_unlock(&dev->struct_mutex);
-}
-EXPORT_SYMBOL(drm_legacy_rmmap);
-
-void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
-{
-	struct drm_map_list *r_list, *list_temp;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
-		if (r_list->master == master) {
-			drm_legacy_rmmap_locked(dev, r_list->map);
-			r_list = NULL;
-		}
-	}
-	mutex_unlock(&dev->struct_mutex);
-}
-
-void drm_legacy_rmmaps(struct drm_device *dev)
-{
-	struct drm_map_list *r_list, *list_temp;
-
-	list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
-		drm_legacy_rmmap(dev, r_list->map);
-}
-
-/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
- * the last close of the device, and this is necessary for cleanup when things
- * exit uncleanly.  Therefore, having userland manually remove mappings seems
- * like a pointless exercise since they're going away anyway.
- *
- * One use case might be after addmap is allowed for normal users for SHM and
- * gets used by drivers that the server doesn't need to care about.  This seems
- * unlikely.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_map structure.
- * \return zero on success or a negative value on error.
- */
-int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv)
-{
-	struct drm_map *request = data;
-	struct drm_local_map *map = NULL;
-	struct drm_map_list *r_list;
-	int ret;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(r_list, &dev->maplist, head) {
-		if (r_list->map &&
-		    r_list->user_token == (unsigned long)request->handle &&
-		    r_list->map->flags & _DRM_REMOVABLE) {
-			map = r_list->map;
-			break;
-		}
-	}
-
-	/* List has wrapped around to the head pointer, or it's empty we didn't
-	 * find anything.
-	 */
-	if (list_empty(&dev->maplist) || !map) {
-		mutex_unlock(&dev->struct_mutex);
-		return -EINVAL;
-	}
-
-	/* Register and framebuffer maps are permanent */
-	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
-		mutex_unlock(&dev->struct_mutex);
-		return 0;
-	}
-
-	ret = drm_legacy_rmmap_locked(dev, map);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-/*
- * Cleanup after an error on one of the addbufs() functions.
- *
- * \param dev DRM device.
- * \param entry buffer entry where the error occurred.
- *
- * Frees any pages and buffers associated with the given entry.
- */
-static void drm_cleanup_buf_error(struct drm_device *dev,
-				  struct drm_buf_entry *entry)
-{
-	drm_dma_handle_t *dmah;
-	int i;
-
-	if (entry->seg_count) {
-		for (i = 0; i < entry->seg_count; i++) {
-			if (entry->seglist[i]) {
-				dmah = entry->seglist[i];
-				dma_free_coherent(dev->dev,
-						  dmah->size,
-						  dmah->vaddr,
-						  dmah->busaddr);
-				kfree(dmah);
-			}
-		}
-		kfree(entry->seglist);
-
-		entry->seg_count = 0;
-	}
-
-	if (entry->buf_count) {
-		for (i = 0; i < entry->buf_count; i++) {
-			kfree(entry->buflist[i].dev_private);
-		}
-		kfree(entry->buflist);
-
-		entry->buf_count = 0;
-	}
-}
-
-#if IS_ENABLED(CONFIG_AGP)
-/*
- * Add AGP buffers for DMA transfers.
- *
- * \param dev struct drm_device to which the buffers are to be added.
- * \param request pointer to a struct drm_buf_desc describing the request.
- * \return zero on success or a negative number on failure.
- *
- * After some sanity checks creates a drm_buf structure for each buffer and
- * reallocates the buffer list of the same size order to accommodate the new
- * buffers.
- */
-int drm_legacy_addbufs_agp(struct drm_device *dev,
-			   struct drm_buf_desc *request)
-{
-	struct drm_device_dma *dma = dev->dma;
-	struct drm_buf_entry *entry;
-	struct drm_agp_mem *agp_entry;
-	struct drm_buf *buf;
-	unsigned long offset;
-	unsigned long agp_offset;
-	int count;
-	int order;
-	int size;
-	int alignment;
-	int page_order;
-	int total;
-	int byte_count;
-	int i, valid;
-	struct drm_buf **temp_buflist;
-
-	if (!dma)
-		return -EINVAL;
-
-	count = request->count;
-	order = order_base_2(request->size);
-	size = 1 << order;
-
-	alignment = (request->flags & _DRM_PAGE_ALIGN)
-	    ? PAGE_ALIGN(size) : size;
-	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
-	total = PAGE_SIZE << page_order;
-
-	byte_count = 0;
-	agp_offset = dev->agp->base + request->agp_start;
-
-	DRM_DEBUG("count:      %d\n", count);
-	DRM_DEBUG("order:      %d\n", order);
-	DRM_DEBUG("size:       %d\n", size);
-	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
-	DRM_DEBUG("alignment:  %d\n", alignment);
-	DRM_DEBUG("page_order: %d\n", page_order);
-	DRM_DEBUG("total:      %d\n", total);
-
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return -EINVAL;
-
-	/* Make sure buffers are located in AGP memory that we own */
-	valid = 0;
-	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
-		if ((agp_offset >= agp_entry->bound) &&
-		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
-			valid = 1;
-			break;
-		}
-	}
-	if (!list_empty(&dev->agp->memory) && !valid) {
-		DRM_DEBUG("zone invalid\n");
-		return -EINVAL;
-	}
-	spin_lock(&dev->buf_lock);
-	if (dev->buf_use) {
-		spin_unlock(&dev->buf_lock);
-		return -EBUSY;
-	}
-	atomic_inc(&dev->buf_alloc);
-	spin_unlock(&dev->buf_lock);
-
-	mutex_lock(&dev->struct_mutex);
-	entry = &dma->bufs[order];
-	if (entry->buf_count) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;	/* May only call once for each order */
-	}
-
-	if (count < 0 || count > 4096) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -EINVAL;
-	}
-
-	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
-	if (!entry->buflist) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-
-	entry->buf_size = size;
-	entry->page_order = page_order;
-
-	offset = 0;
-
-	while (entry->buf_count < count) {
-		buf = &entry->buflist[entry->buf_count];
-		buf->idx = dma->buf_count + entry->buf_count;
-		buf->total = alignment;
-		buf->order = order;
-		buf->used = 0;
-
-		buf->offset = (dma->byte_count + offset);
-		buf->bus_address = agp_offset + offset;
-		buf->address = (void *)(agp_offset + offset);
-		buf->next = NULL;
-		buf->waiting = 0;
-		buf->pending = 0;
-		buf->file_priv = NULL;
-
-		buf->dev_priv_size = dev->driver->dev_priv_size;
-		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
-		if (!buf->dev_private) {
-			/* Set count correctly so we free the proper amount. */
-			entry->buf_count = count;
-			drm_cleanup_buf_error(dev, entry);
-			mutex_unlock(&dev->struct_mutex);
-			atomic_dec(&dev->buf_alloc);
-			return -ENOMEM;
-		}
-
-		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
-		offset += alignment;
-		entry->buf_count++;
-		byte_count += PAGE_SIZE << page_order;
-	}
-
-	DRM_DEBUG("byte_count: %d\n", byte_count);
-
-	temp_buflist = krealloc(dma->buflist,
-				(dma->buf_count + entry->buf_count) *
-				sizeof(*dma->buflist), GFP_KERNEL);
-	if (!temp_buflist) {
-		/* Free the entry because it isn't valid */
-		drm_cleanup_buf_error(dev, entry);
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-	dma->buflist = temp_buflist;
-
-	for (i = 0; i < entry->buf_count; i++) {
-		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
-	}
-
-	dma->buf_count += entry->buf_count;
-	dma->seg_count += entry->seg_count;
-	dma->page_count += byte_count >> PAGE_SHIFT;
-	dma->byte_count += byte_count;
-
-	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
-	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	request->count = entry->buf_count;
-	request->size = size;
-
-	dma->flags = _DRM_DMA_USE_AGP;
-
-	atomic_dec(&dev->buf_alloc);
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_agp);
-#endif /* CONFIG_AGP */
-
-int drm_legacy_addbufs_pci(struct drm_device *dev,
-			   struct drm_buf_desc *request)
-{
-	struct drm_device_dma *dma = dev->dma;
-	int count;
-	int order;
-	int size;
-	int total;
-	int page_order;
-	struct drm_buf_entry *entry;
-	drm_dma_handle_t *dmah;
-	struct drm_buf *buf;
-	int alignment;
-	unsigned long offset;
-	int i;
-	int byte_count;
-	int page_count;
-	unsigned long *temp_pagelist;
-	struct drm_buf **temp_buflist;
-
-	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
-		return -EOPNOTSUPP;
-
-	if (!dma)
-		return -EINVAL;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	count = request->count;
-	order = order_base_2(request->size);
-	size = 1 << order;
-
-	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
-		  request->count, request->size, size, order);
-
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return -EINVAL;
-
-	alignment = (request->flags & _DRM_PAGE_ALIGN)
-	    ? PAGE_ALIGN(size) : size;
-	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
-	total = PAGE_SIZE << page_order;
-
-	spin_lock(&dev->buf_lock);
-	if (dev->buf_use) {
-		spin_unlock(&dev->buf_lock);
-		return -EBUSY;
-	}
-	atomic_inc(&dev->buf_alloc);
-	spin_unlock(&dev->buf_lock);
-
-	mutex_lock(&dev->struct_mutex);
-	entry = &dma->bufs[order];
-	if (entry->buf_count) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;	/* May only call once for each order */
-	}
-
-	if (count < 0 || count > 4096) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -EINVAL;
-	}
-
-	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
-	if (!entry->buflist) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-
-	entry->seglist = kcalloc(count, sizeof(*entry->seglist), GFP_KERNEL);
-	if (!entry->seglist) {
-		kfree(entry->buflist);
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-
-	/* Keep the original pagelist until we know all the allocations
-	 * have succeeded
-	 */
-	temp_pagelist = kmalloc_array(dma->page_count + (count << page_order),
-				      sizeof(*dma->pagelist),
-				      GFP_KERNEL);
-	if (!temp_pagelist) {
-		kfree(entry->buflist);
-		kfree(entry->seglist);
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-	memcpy(temp_pagelist,
-	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
-	DRM_DEBUG("pagelist: %d entries\n",
-		  dma->page_count + (count << page_order));
-
-	entry->buf_size = size;
-	entry->page_order = page_order;
-	byte_count = 0;
-	page_count = 0;
-
-	while (entry->buf_count < count) {
-		dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
-		if (!dmah) {
-			/* Set count correctly so we free the proper amount. */
-			entry->buf_count = count;
-			entry->seg_count = count;
-			drm_cleanup_buf_error(dev, entry);
-			kfree(temp_pagelist);
-			mutex_unlock(&dev->struct_mutex);
-			atomic_dec(&dev->buf_alloc);
-			return -ENOMEM;
-		}
-
-		dmah->size = total;
-		dmah->vaddr = dma_alloc_coherent(dev->dev,
-						 dmah->size,
-						 &dmah->busaddr,
-						 GFP_KERNEL);
-		if (!dmah->vaddr) {
-			kfree(dmah);
-
-			/* Set count correctly so we free the proper amount. */
-			entry->buf_count = count;
-			entry->seg_count = count;
-			drm_cleanup_buf_error(dev, entry);
-			kfree(temp_pagelist);
-			mutex_unlock(&dev->struct_mutex);
-			atomic_dec(&dev->buf_alloc);
-			return -ENOMEM;
-		}
-		entry->seglist[entry->seg_count++] = dmah;
-		for (i = 0; i < (1 << page_order); i++) {
-			DRM_DEBUG("page %d @ 0x%08lx\n",
-				  dma->page_count + page_count,
-				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
-			temp_pagelist[dma->page_count + page_count++]
-				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
-		}
-		for (offset = 0;
-		     offset + size <= total && entry->buf_count < count;
-		     offset += alignment, ++entry->buf_count) {
-			buf = &entry->buflist[entry->buf_count];
-			buf->idx = dma->buf_count + entry->buf_count;
-			buf->total = alignment;
-			buf->order = order;
-			buf->used = 0;
-			buf->offset = (dma->byte_count + byte_count + offset);
-			buf->address = (void *)(dmah->vaddr + offset);
-			buf->bus_address = dmah->busaddr + offset;
-			buf->next = NULL;
-			buf->waiting = 0;
-			buf->pending = 0;
-			buf->file_priv = NULL;
-
-			buf->dev_priv_size = dev->driver->dev_priv_size;
-			buf->dev_private = kzalloc(buf->dev_priv_size,
-						GFP_KERNEL);
-			if (!buf->dev_private) {
-				/* Set count correctly so we free the proper amount. */
-				entry->buf_count = count;
-				entry->seg_count = count;
-				drm_cleanup_buf_error(dev, entry);
-				kfree(temp_pagelist);
-				mutex_unlock(&dev->struct_mutex);
-				atomic_dec(&dev->buf_alloc);
-				return -ENOMEM;
-			}
-
-			DRM_DEBUG("buffer %d @ %p\n",
-				  entry->buf_count, buf->address);
-		}
-		byte_count += PAGE_SIZE << page_order;
-	}
-
-	temp_buflist = krealloc(dma->buflist,
-				(dma->buf_count + entry->buf_count) *
-				sizeof(*dma->buflist), GFP_KERNEL);
-	if (!temp_buflist) {
-		/* Free the entry because it isn't valid */
-		drm_cleanup_buf_error(dev, entry);
-		kfree(temp_pagelist);
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-	dma->buflist = temp_buflist;
-
-	for (i = 0; i < entry->buf_count; i++) {
-		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
-	}
-
-	/* No allocations failed, so now we can replace the original pagelist
-	 * with the new one.
-	 */
-	if (dma->page_count) {
-		kfree(dma->pagelist);
-	}
-	dma->pagelist = temp_pagelist;
-
-	dma->buf_count += entry->buf_count;
-	dma->seg_count += entry->seg_count;
-	dma->page_count += entry->seg_count << page_order;
-	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	request->count = entry->buf_count;
-	request->size = size;
-
-	if (request->flags & _DRM_PCI_BUFFER_RO)
-		dma->flags = _DRM_DMA_USE_PCI_RO;
-
-	atomic_dec(&dev->buf_alloc);
-	return 0;
-
-}
-EXPORT_SYMBOL(drm_legacy_addbufs_pci);
-
-static int drm_legacy_addbufs_sg(struct drm_device *dev,
-				 struct drm_buf_desc *request)
-{
-	struct drm_device_dma *dma = dev->dma;
-	struct drm_buf_entry *entry;
-	struct drm_buf *buf;
-	unsigned long offset;
-	unsigned long agp_offset;
-	int count;
-	int order;
-	int size;
-	int alignment;
-	int page_order;
-	int total;
-	int byte_count;
-	int i;
-	struct drm_buf **temp_buflist;
-
-	if (!drm_core_check_feature(dev, DRIVER_SG))
-		return -EOPNOTSUPP;
-
-	if (!dma)
-		return -EINVAL;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EPERM;
-
-	count = request->count;
-	order = order_base_2(request->size);
-	size = 1 << order;
-
-	alignment = (request->flags & _DRM_PAGE_ALIGN)
-	    ? PAGE_ALIGN(size) : size;
-	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
-	total = PAGE_SIZE << page_order;
-
-	byte_count = 0;
-	agp_offset = request->agp_start;
-
-	DRM_DEBUG("count:      %d\n", count);
-	DRM_DEBUG("order:      %d\n", order);
-	DRM_DEBUG("size:       %d\n", size);
-	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
-	DRM_DEBUG("alignment:  %d\n", alignment);
-	DRM_DEBUG("page_order: %d\n", page_order);
-	DRM_DEBUG("total:      %d\n", total);
-
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return -EINVAL;
-
-	spin_lock(&dev->buf_lock);
-	if (dev->buf_use) {
-		spin_unlock(&dev->buf_lock);
-		return -EBUSY;
-	}
-	atomic_inc(&dev->buf_alloc);
-	spin_unlock(&dev->buf_lock);
-
-	mutex_lock(&dev->struct_mutex);
-	entry = &dma->bufs[order];
-	if (entry->buf_count) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;	/* May only call once for each order */
-	}
-
-	if (count < 0 || count > 4096) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -EINVAL;
-	}
-
-	entry->buflist = kcalloc(count, sizeof(*entry->buflist), GFP_KERNEL);
-	if (!entry->buflist) {
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-
-	entry->buf_size = size;
-	entry->page_order = page_order;
-
-	offset = 0;
-
-	while (entry->buf_count < count) {
-		buf = &entry->buflist[entry->buf_count];
-		buf->idx = dma->buf_count + entry->buf_count;
-		buf->total = alignment;
-		buf->order = order;
-		buf->used = 0;
-
-		buf->offset = (dma->byte_count + offset);
-		buf->bus_address = agp_offset + offset;
-		buf->address = (void *)(agp_offset + offset
-					+ (unsigned long)dev->sg->virtual);
-		buf->next = NULL;
-		buf->waiting = 0;
-		buf->pending = 0;
-		buf->file_priv = NULL;
-
-		buf->dev_priv_size = dev->driver->dev_priv_size;
-		buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
-		if (!buf->dev_private) {
-			/* Set count correctly so we free the proper amount. */
-			entry->buf_count = count;
-			drm_cleanup_buf_error(dev, entry);
-			mutex_unlock(&dev->struct_mutex);
-			atomic_dec(&dev->buf_alloc);
-			return -ENOMEM;
-		}
-
-		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
-		offset += alignment;
-		entry->buf_count++;
-		byte_count += PAGE_SIZE << page_order;
-	}
-
-	DRM_DEBUG("byte_count: %d\n", byte_count);
-
-	temp_buflist = krealloc(dma->buflist,
-				(dma->buf_count + entry->buf_count) *
-				sizeof(*dma->buflist), GFP_KERNEL);
-	if (!temp_buflist) {
-		/* Free the entry because it isn't valid */
-		drm_cleanup_buf_error(dev, entry);
-		mutex_unlock(&dev->struct_mutex);
-		atomic_dec(&dev->buf_alloc);
-		return -ENOMEM;
-	}
-	dma->buflist = temp_buflist;
-
-	for (i = 0; i < entry->buf_count; i++) {
-		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
-	}
-
-	dma->buf_count += entry->buf_count;
-	dma->seg_count += entry->seg_count;
-	dma->page_count += byte_count >> PAGE_SHIFT;
-	dma->byte_count += byte_count;
-
-	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
-	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	request->count = entry->buf_count;
-	request->size = size;
-
-	dma->flags = _DRM_DMA_USE_SG;
-
-	atomic_dec(&dev->buf_alloc);
-	return 0;
-}
-
-/*
- * Add buffers for DMA transfers (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a struct drm_buf_desc request.
- * \return zero on success or a negative number on failure.
- *
- * According with the memory type specified in drm_buf_desc::flags and the
- * build options, it dispatches the call either to addbufs_agp(),
- * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
- * PCI memory respectively.
- */
-int drm_legacy_addbufs(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	struct drm_buf_desc *request = data;
-	int ret;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-		return -EOPNOTSUPP;
-
-#if IS_ENABLED(CONFIG_AGP)
-	if (request->flags & _DRM_AGP_BUFFER)
-		ret = drm_legacy_addbufs_agp(dev, request);
-	else
-#endif
-	if (request->flags & _DRM_SG_BUFFER)
-		ret = drm_legacy_addbufs_sg(dev, request);
-	else if (request->flags & _DRM_FB_BUFFER)
-		ret = -EINVAL;
-	else
-		ret = drm_legacy_addbufs_pci(dev, request);
-
-	return ret;
-}
-
-/*
- * Get information about the buffer mappings.
- *
- * This was originally mean for debugging purposes, or by a sophisticated
- * client library to determine how best to use the available buffers (e.g.,
- * large buffers can be used for image transfer).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_info structure.
- * \return zero on success or a negative number on failure.
- *
- * Increments drm_device::buf_use while holding the drm_device::buf_lock
- * lock, preventing of allocating more buffers after this call. Information
- * about each requested buffer is then copied into user space.
- */
-int __drm_legacy_infobufs(struct drm_device *dev,
-			void *data, int *p,
-			int (*f)(void *, int, struct drm_buf_entry *))
-{
-	struct drm_device_dma *dma = dev->dma;
-	int i;
-	int count;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-		return -EOPNOTSUPP;
-
-	if (!dma)
-		return -EINVAL;
-
-	spin_lock(&dev->buf_lock);
-	if (atomic_read(&dev->buf_alloc)) {
-		spin_unlock(&dev->buf_lock);
-		return -EBUSY;
-	}
-	++dev->buf_use;		/* Can't allocate more after this call */
-	spin_unlock(&dev->buf_lock);
-
-	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
-		if (dma->bufs[i].buf_count)
-			++count;
-	}
-
-	DRM_DEBUG("count = %d\n", count);
-
-	if (*p >= count) {
-		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
-			struct drm_buf_entry *from = &dma->bufs[i];
-
-			if (from->buf_count) {
-				if (f(data, count, from) < 0)
-					return -EFAULT;
-				DRM_DEBUG("%d %d %d %d %d\n",
-					  i,
-					  dma->bufs[i].buf_count,
-					  dma->bufs[i].buf_size,
-					  dma->bufs[i].low_mark,
-					  dma->bufs[i].high_mark);
-				++count;
-			}
-		}
-	}
-	*p = count;
-
-	return 0;
-}
-
-static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
-{
-	struct drm_buf_info *request = data;
-	struct drm_buf_desc __user *to = &request->list[count];
-	struct drm_buf_desc v = {.count = from->buf_count,
-				 .size = from->buf_size,
-				 .low_mark = from->low_mark,
-				 .high_mark = from->high_mark};
-
-	if (copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)))
-		return -EFAULT;
-	return 0;
-}
-
-int drm_legacy_infobufs(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_buf_info *request = data;
-
-	return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
-}
-
-/*
- * Specifies a low and high water mark for buffer allocation
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg a pointer to a drm_buf_desc structure.
- * \return zero on success or a negative number on failure.
- *
- * Verifies that the size order is bounded between the admissible orders and
- * updates the respective drm_device_dma::bufs entry low and high water mark.
- *
- * \note This ioctl is deprecated and mostly never used.
- */
-int drm_legacy_markbufs(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_device_dma *dma = dev->dma;
-	struct drm_buf_desc *request = data;
-	int order;
-	struct drm_buf_entry *entry;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-		return -EOPNOTSUPP;
-
-	if (!dma)
-		return -EINVAL;
-
-	DRM_DEBUG("%d, %d, %d\n",
-		  request->size, request->low_mark, request->high_mark);
-	order = order_base_2(request->size);
-	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
-		return -EINVAL;
-	entry = &dma->bufs[order];
-
-	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
-		return -EINVAL;
-	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
-		return -EINVAL;
-
-	entry->low_mark = request->low_mark;
-	entry->high_mark = request->high_mark;
-
-	return 0;
-}
-
-/*
- * Unreserve the buffers in list, previously reserved using drmDMA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_free structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls free_buffer() for each used buffer.
- * This function is primarily used for debugging.
- */
-int drm_legacy_freebufs(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_device_dma *dma = dev->dma;
-	struct drm_buf_free *request = data;
-	int i;
-	int idx;
-	struct drm_buf *buf;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-		return -EOPNOTSUPP;
-
-	if (!dma)
-		return -EINVAL;
-
-	DRM_DEBUG("%d\n", request->count);
-	for (i = 0; i < request->count; i++) {
-		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
-			return -EFAULT;
-		if (idx < 0 || idx >= dma->buf_count) {
-			DRM_ERROR("Index %d (of %d max)\n",
-				  idx, dma->buf_count - 1);
-			return -EINVAL;
-		}
-		idx = array_index_nospec(idx, dma->buf_count);
-		buf = dma->buflist[idx];
-		if (buf->file_priv != file_priv) {
-			DRM_ERROR("Process %d freeing buffer not owned\n",
-				  task_pid_nr(current));
-			return -EINVAL;
-		}
-		drm_legacy_free_buffer(dev, buf);
-	}
-
-	return 0;
-}
-
-/*
- * Maps all of the DMA buffers into client-virtual space (ioctl).
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg pointer to a drm_buf_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
- * offset equal to 0, which drm_mmap() interprets as PCI buffers and calls
- * drm_mmap_dma().
- */
-int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
-			 void __user **v,
-			 int (*f)(void *, int, unsigned long,
-				 struct drm_buf *),
-				 struct drm_file *file_priv)
-{
-	struct drm_device_dma *dma = dev->dma;
-	int retcode = 0;
-	unsigned long virtual;
-	int i;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-		return -EOPNOTSUPP;
-
-	if (!dma)
-		return -EINVAL;
-
-	spin_lock(&dev->buf_lock);
-	if (atomic_read(&dev->buf_alloc)) {
-		spin_unlock(&dev->buf_lock);
-		return -EBUSY;
-	}
-	dev->buf_use++;		/* Can't allocate more after this call */
-	spin_unlock(&dev->buf_lock);
-
-	if (*p >= dma->buf_count) {
-		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
-		    || (drm_core_check_feature(dev, DRIVER_SG)
-			&& (dma->flags & _DRM_DMA_USE_SG))) {
-			struct drm_local_map *map = dev->agp_buffer_map;
-			unsigned long token = dev->agp_buffer_token;
-
-			if (!map) {
-				retcode = -EINVAL;
-				goto done;
-			}
-			virtual = vm_mmap(file_priv->filp, 0, map->size,
-					  PROT_READ | PROT_WRITE,
-					  MAP_SHARED,
-					  token);
-		} else {
-			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
-					  PROT_READ | PROT_WRITE,
-					  MAP_SHARED, 0);
-		}
-		if (virtual > -1024UL) {
-			/* Real error */
-			retcode = (signed long)virtual;
-			goto done;
-		}
-		*v = (void __user *)virtual;
-
-		for (i = 0; i < dma->buf_count; i++) {
-			if (f(data, i, virtual, dma->buflist[i]) < 0) {
-				retcode = -EFAULT;
-				goto done;
-			}
-		}
-	}
-      done:
-	*p = dma->buf_count;
-	DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
-
-	return retcode;
-}
-
-static int map_one_buf(void *data, int idx, unsigned long virtual,
-			struct drm_buf *buf)
-{
-	struct drm_buf_map *request = data;
-	unsigned long address = virtual + buf->offset;	/* *** */
-
-	if (copy_to_user(&request->list[idx].idx, &buf->idx,
-			 sizeof(request->list[0].idx)))
-		return -EFAULT;
-	if (copy_to_user(&request->list[idx].total, &buf->total,
-			 sizeof(request->list[0].total)))
-		return -EFAULT;
-	if (clear_user(&request->list[idx].used, sizeof(int)))
-		return -EFAULT;
-	if (copy_to_user(&request->list[idx].address, &address,
-			 sizeof(address)))
-		return -EFAULT;
-	return 0;
-}
-
-int drm_legacy_mapbufs(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	struct drm_buf_map *request = data;
-
-	return __drm_legacy_mapbufs(dev, data, &request->count,
-				    &request->virtual, map_one_buf,
-				    file_priv);
-}
-
-int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
-		  struct drm_file *file_priv)
-{
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (dev->driver->dma_ioctl)
-		return dev->driver->dma_ioctl(dev, data, file_priv);
-	else
-		return -EINVAL;
-}
-
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
-{
-	struct drm_map_list *entry;
-
-	list_for_each_entry(entry, &dev->maplist, head) {
-		if (entry->map && entry->map->type == _DRM_SHM &&
-		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
-			return entry->map;
-		}
-	}
-	return NULL;
-}
-EXPORT_SYMBOL(drm_legacy_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
deleted file mode 100644
index a0fc779e5e1e8a14ab48f9c7f43ff9419b022e2b..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_context.c
+++ /dev/null
@@ -1,513 +0,0 @@
-/*
- * Legacy: Generic DRM Contexts
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-struct drm_ctx_list {
-	struct list_head head;
-	drm_context_t handle;
-	struct drm_file *tag;
-};
-
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
-/*
- * Free a handle from the context bitmap.
- *
- * \param dev DRM device.
- * \param ctx_handle context handle.
- *
- * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
- * lock.
- */
-void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
-{
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	mutex_lock(&dev->struct_mutex);
-	idr_remove(&dev->ctx_idr, ctx_handle);
-	mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * Context bitmap allocation.
- *
- * \param dev DRM device.
- * \return (non-negative) context handle on success or a negative number on failure.
- *
- * Allocate a new idr from drm_device::ctx_idr while holding the
- * drm_device::struct_mutex lock.
- */
-static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
-{
-	int ret;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
-			GFP_KERNEL);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
-}
-
-/*
- * Context bitmap initialization.
- *
- * \param dev DRM device.
- *
- * Initialise the drm_device::ctx_idr
- */
-void drm_legacy_ctxbitmap_init(struct drm_device * dev)
-{
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	idr_init(&dev->ctx_idr);
-}
-
-/*
- * Context bitmap cleanup.
- *
- * \param dev DRM device.
- *
- * Free all idr members using drm_ctx_sarea_free helper function
- * while holding the drm_device::struct_mutex lock.
- */
-void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
-{
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	mutex_lock(&dev->struct_mutex);
-	idr_destroy(&dev->ctx_idr);
-	mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * drm_legacy_ctxbitmap_flush() - Flush all contexts owned by a file
- * @dev: DRM device to operate on
- * @file: Open file to flush contexts for
- *
- * This iterates over all contexts on @dev and drops them if they're owned by
- * @file. Note that after this call returns, new contexts might be added if
- * the file is still alive.
- */
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
-{
-	struct drm_ctx_list *pos, *tmp;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	mutex_lock(&dev->ctxlist_mutex);
-
-	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
-		if (pos->tag == file &&
-		    pos->handle != DRM_KERNEL_CONTEXT) {
-			if (dev->driver->context_dtor)
-				dev->driver->context_dtor(dev, pos->handle);
-
-			drm_legacy_ctxbitmap_free(dev, pos->handle);
-			list_del(&pos->head);
-			kfree(pos);
-		}
-	}
-
-	mutex_unlock(&dev->ctxlist_mutex);
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name Per Context SAREA Support */
-/*@{*/
-
-/*
- * Get per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Gets the map from drm_device::ctx_idr with the handle specified and
- * returns its handle.
- */
-int drm_legacy_getsareactx(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv)
-{
-	struct drm_ctx_priv_map *request = data;
-	struct drm_local_map *map;
-	struct drm_map_list *_entry;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	mutex_lock(&dev->struct_mutex);
-
-	map = idr_find(&dev->ctx_idr, request->ctx_id);
-	if (!map) {
-		mutex_unlock(&dev->struct_mutex);
-		return -EINVAL;
-	}
-
-	request->handle = NULL;
-	list_for_each_entry(_entry, &dev->maplist, head) {
-		if (_entry->map == map) {
-			request->handle =
-			    (void *)(unsigned long)_entry->user_token;
-			break;
-		}
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	if (request->handle == NULL)
-		return -EINVAL;
-
-	return 0;
-}
-
-/*
- * Set per-context SAREA.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_priv_map structure.
- * \return zero on success or a negative number on failure.
- *
- * Searches the mapping specified in \p arg and update the entry in
- * drm_device::ctx_idr with it.
- */
-int drm_legacy_setsareactx(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv)
-{
-	struct drm_ctx_priv_map *request = data;
-	struct drm_local_map *map = NULL;
-	struct drm_map_list *r_list = NULL;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(r_list, &dev->maplist, head) {
-		if (r_list->map
-		    && r_list->user_token == (unsigned long) request->handle)
-			goto found;
-	}
-      bad:
-	mutex_unlock(&dev->struct_mutex);
-	return -EINVAL;
-
-      found:
-	map = r_list->map;
-	if (!map)
-		goto bad;
-
-	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
-		goto bad;
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
-/*@}*/
-
-/******************************************************************/
-/** \name The actual DRM context handling routines */
-/*@{*/
-
-/*
- * Switch context.
- *
- * \param dev DRM device.
- * \param old old context handle.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Attempt to set drm_device::context_flag.
- */
-static int drm_context_switch(struct drm_device * dev, int old, int new)
-{
-	if (test_and_set_bit(0, &dev->context_flag)) {
-		DRM_ERROR("Reentering -- FIXME\n");
-		return -EBUSY;
-	}
-
-	DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
-	if (new == dev->last_context) {
-		clear_bit(0, &dev->context_flag);
-		return 0;
-	}
-
-	return 0;
-}
-
-/*
- * Complete context switch.
- *
- * \param dev DRM device.
- * \param new new context handle.
- * \return zero on success or a negative number on failure.
- *
- * Updates drm_device::last_context and drm_device::last_switch. Verifies the
- * hardware lock is held, clears the drm_device::context_flag and wakes up
- * drm_device::context_wait.
- */
-static int drm_context_switch_complete(struct drm_device *dev,
-				       struct drm_file *file_priv, int new)
-{
-	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
-
-	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
-		DRM_ERROR("Lock isn't held after context switch\n");
-	}
-
-	/* If a context switch is ever initiated
-	   when the kernel holds the lock, release
-	   that lock here.
-	 */
-	clear_bit(0, &dev->context_flag);
-
-	return 0;
-}
-
-/*
- * Reserve contexts.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx_res structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_resctx(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
-{
-	struct drm_ctx_res *res = data;
-	struct drm_ctx ctx;
-	int i;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (res->count >= DRM_RESERVED_CONTEXTS) {
-		memset(&ctx, 0, sizeof(ctx));
-		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
-			ctx.handle = i;
-			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
-				return -EFAULT;
-		}
-	}
-	res->count = DRM_RESERVED_CONTEXTS;
-
-	return 0;
-}
-
-/*
- * Add context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Get a new handle for the context and copy to userspace.
- */
-int drm_legacy_addctx(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
-{
-	struct drm_ctx_list *ctx_entry;
-	struct drm_ctx *ctx = data;
-	int tmp_handle;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	tmp_handle = drm_legacy_ctxbitmap_next(dev);
-	if (tmp_handle == DRM_KERNEL_CONTEXT) {
-		/* Skip kernel's context and get a new one. */
-		tmp_handle = drm_legacy_ctxbitmap_next(dev);
-	}
-	DRM_DEBUG("%d\n", tmp_handle);
-	if (tmp_handle < 0) {
-		DRM_DEBUG("Not enough free contexts.\n");
-		/* Should this return -EBUSY instead? */
-		return tmp_handle;
-	}
-
-	ctx->handle = tmp_handle;
-
-	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
-	if (!ctx_entry) {
-		DRM_DEBUG("out of memory\n");
-		return -ENOMEM;
-	}
-
-	INIT_LIST_HEAD(&ctx_entry->head);
-	ctx_entry->handle = ctx->handle;
-	ctx_entry->tag = file_priv;
-
-	mutex_lock(&dev->ctxlist_mutex);
-	list_add(&ctx_entry->head, &dev->ctxlist);
-	mutex_unlock(&dev->ctxlist_mutex);
-
-	return 0;
-}
-
-/*
- * Get context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- */
-int drm_legacy_getctx(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
-{
-	struct drm_ctx *ctx = data;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	/* This is 0, because we don't handle any context flags */
-	ctx->flags = 0;
-
-	return 0;
-}
-
-/*
- * Switch context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch().
- */
-int drm_legacy_switchctx(struct drm_device *dev, void *data,
-			 struct drm_file *file_priv)
-{
-	struct drm_ctx *ctx = data;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	DRM_DEBUG("%d\n", ctx->handle);
-	return drm_context_switch(dev, dev->last_context, ctx->handle);
-}
-
-/*
- * New context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * Calls context_switch_complete().
- */
-int drm_legacy_newctx(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
-{
-	struct drm_ctx *ctx = data;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	DRM_DEBUG("%d\n", ctx->handle);
-	drm_context_switch_complete(dev, file_priv, ctx->handle);
-
-	return 0;
-}
-
-/*
- * Remove context.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument pointing to a drm_ctx structure.
- * \return zero on success or a negative number on failure.
- *
- * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
- */
-int drm_legacy_rmctx(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	struct drm_ctx *ctx = data;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	DRM_DEBUG("%d\n", ctx->handle);
-	if (ctx->handle != DRM_KERNEL_CONTEXT) {
-		if (dev->driver->context_dtor)
-			dev->driver->context_dtor(dev, ctx->handle);
-		drm_legacy_ctxbitmap_free(dev, ctx->handle);
-	}
-
-	mutex_lock(&dev->ctxlist_mutex);
-	if (!list_empty(&dev->ctxlist)) {
-		struct drm_ctx_list *pos, *n;
-
-		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
-			if (pos->handle == ctx->handle) {
-				list_del(&pos->head);
-				kfree(pos);
-			}
-		}
-	}
-	mutex_unlock(&dev->ctxlist_mutex);
-
-	return 0;
-}
-
-/*@}*/
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index a209659a996c74b9d4beeb98e8590bd95573872b..2dafc39a27cb93b3bdd7f8e082396ae1b4e08d55 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -439,11 +439,8 @@ EXPORT_SYMBOL(drm_crtc_helper_set_mode);
  * @state: atomic state object
  *
  * Provides a default CRTC-state check handler for CRTCs that only have
- * one primary plane attached to it.
- *
- * This is often the case for the CRTC of simple framebuffers. See also
- * drm_plane_helper_atomic_check() for the respective plane-state check
- * helper function.
+ * one primary plane attached to it. This is often the case for the CRTC
+ * of simple framebuffers.
  *
  * RETURNS:
  * Zero on success, or an errno code otherwise.
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 6b646e0783be9626dbbde60900b2bd2b94621d18..a514d5207e4197ac6434fbfeb9fb3a4bfe0d771e 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -253,7 +253,7 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
 			    struct drm_file *file_priv,
 			    struct drm_mode_object *obj,
 			    struct drm_property *prop,
-			    uint64_t prop_value);
+			    u64 prop_value, bool async_flip);
 int drm_atomic_get_property(struct drm_mode_object *obj,
 			    struct drm_property *property, uint64_t *val);
 
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index d8b2955e88fd0a606c37e74ed454f4a27874a26d..afb02aae707b4f98f4084d65436166c56b7b5006 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -241,7 +241,8 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
 	iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
 	iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
 
-	if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+	if (!iter->clips || state->ignore_damage_clips ||
+	    !drm_rect_equals(&state->src, &old_state->src)) {
 		iter->clips = NULL;
 		iter->num_clips = 0;
 		iter->full_update = true;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index f291fb4b359fdf0d918ddaff713861ffa2424e3b..02e7481758c0afac6f0cbef9e45d24b5f78206c4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -314,10 +314,8 @@ void drm_debugfs_dev_register(struct drm_device *dev)
 		drm_framebuffer_debugfs_init(dev);
 		drm_client_debugfs_init(dev);
 	}
-	if (drm_drv_uses_atomic_modeset(dev)) {
+	if (drm_drv_uses_atomic_modeset(dev))
 		drm_atomic_debugfs_init(dev);
-		drm_bridge_debugfs_init(dev);
-	}
 }
 
 int drm_debugfs_register(struct drm_minor *minor, int minor_id,
@@ -589,4 +587,65 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
 	crtc->debugfs_entry = NULL;
 }
 
+static int bridges_show(struct seq_file *m, void *data)
+{
+	struct drm_encoder *encoder = m->private;
+	struct drm_printer p = drm_seq_file_printer(m);
+	struct drm_bridge *bridge;
+	unsigned int idx = 0;
+
+	drm_for_each_bridge_in_chain(encoder, bridge) {
+		drm_printf(&p, "bridge[%d]: %ps\n", idx++, bridge->funcs);
+		drm_printf(&p, "\ttype: [%d] %s\n",
+			   bridge->type,
+			   drm_get_connector_type_name(bridge->type));
+#ifdef CONFIG_OF
+		if (bridge->of_node)
+			drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node);
+#endif
+		drm_printf(&p, "\tops: [0x%x]", bridge->ops);
+		if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+			drm_puts(&p, " detect");
+		if (bridge->ops & DRM_BRIDGE_OP_EDID)
+			drm_puts(&p, " edid");
+		if (bridge->ops & DRM_BRIDGE_OP_HPD)
+			drm_puts(&p, " hpd");
+		if (bridge->ops & DRM_BRIDGE_OP_MODES)
+			drm_puts(&p, " modes");
+		drm_puts(&p, "\n");
+	}
+
+	return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(bridges);
+
+void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+	struct drm_minor *minor = encoder->dev->primary;
+	struct dentry *root;
+	char *name;
+
+	name = kasprintf(GFP_KERNEL, "encoder-%d", encoder->index);
+	if (!name)
+		return;
+
+	root = debugfs_create_dir(name, minor->debugfs_root);
+	kfree(name);
+
+	encoder->debugfs_entry = root;
+
+	/* bridges list */
+	debugfs_create_file("bridges", 0444, root, encoder,
+			    &bridges_fops);
+
+	if (encoder->funcs->debugfs_init)
+		encoder->funcs->debugfs_init(encoder, root);
+}
+
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+	debugfs_remove_recursive(encoder->debugfs_entry);
+	encoder->debugfs_entry = NULL;
+}
+
 #endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
deleted file mode 100644
index eb6b741a6f995de0633aff13660989a60bc9a8e6..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_dma.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * \file drm_dma.c
- * DMA IOCTL and function support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-/**
- * drm_legacy_dma_setup() - Initialize the DMA data.
- *
- * @dev: DRM device.
- * Return: zero on success or a negative value on failure.
- *
- * Allocate and initialize a drm_device_dma structure.
- */
-int drm_legacy_dma_setup(struct drm_device *dev)
-{
-	int i;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
-	    !drm_core_check_feature(dev, DRIVER_LEGACY))
-		return 0;
-
-	dev->buf_use = 0;
-	atomic_set(&dev->buf_alloc, 0);
-
-	dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
-	if (!dev->dma)
-		return -ENOMEM;
-
-	for (i = 0; i <= DRM_MAX_ORDER; i++)
-		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
-
-	return 0;
-}
-
-/**
- * drm_legacy_dma_takedown() - Cleanup the DMA resources.
- *
- * @dev: DRM device.
- *
- * Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the drm_device::dma structure itself.
- */
-void drm_legacy_dma_takedown(struct drm_device *dev)
-{
-	struct drm_device_dma *dma = dev->dma;
-	drm_dma_handle_t *dmah;
-	int i, j;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
-	    !drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	if (!dma)
-		return;
-
-	/* Clear dma buffers */
-	for (i = 0; i <= DRM_MAX_ORDER; i++) {
-		if (dma->bufs[i].seg_count) {
-			DRM_DEBUG("order %d: buf_count = %d,"
-				  " seg_count = %d\n",
-				  i,
-				  dma->bufs[i].buf_count,
-				  dma->bufs[i].seg_count);
-			for (j = 0; j < dma->bufs[i].seg_count; j++) {
-				if (dma->bufs[i].seglist[j]) {
-					dmah = dma->bufs[i].seglist[j];
-					dma_free_coherent(dev->dev,
-							  dmah->size,
-							  dmah->vaddr,
-							  dmah->busaddr);
-					kfree(dmah);
-				}
-			}
-			kfree(dma->bufs[i].seglist);
-		}
-		if (dma->bufs[i].buf_count) {
-			for (j = 0; j < dma->bufs[i].buf_count; j++) {
-				kfree(dma->bufs[i].buflist[j].dev_private);
-			}
-			kfree(dma->bufs[i].buflist);
-		}
-	}
-
-	kfree(dma->buflist);
-	kfree(dma->pagelist);
-	kfree(dev->dma);
-	dev->dma = NULL;
-}
-
-/**
- * drm_legacy_free_buffer() - Free a buffer.
- *
- * @dev: DRM device.
- * @buf: buffer to free.
- *
- * Resets the fields of \p buf.
- */
-void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
-{
-	if (!buf)
-		return;
-
-	buf->waiting = 0;
-	buf->pending = 0;
-	buf->file_priv = NULL;
-	buf->used = 0;
-}
-
-/**
- * drm_legacy_reclaim_buffers() - Reclaim the buffers.
- *
- * @dev: DRM device.
- * @file_priv: DRM file private.
- *
- * Frees each buffer associated with \p file_priv not already on the hardware.
- */
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
-				struct drm_file *file_priv)
-{
-	struct drm_device_dma *dma = dev->dma;
-	int i;
-
-	if (!dma)
-		return;
-	for (i = 0; i < dma->buf_count; i++) {
-		if (dma->buflist[i]->file_priv == file_priv) {
-			switch (dma->buflist[i]->list) {
-			case DRM_LIST_NONE:
-				drm_legacy_free_buffer(dev, dma->buflist[i]);
-				break;
-			case DRM_LIST_WAIT:
-				dma->buflist[i]->list = DRM_LIST_RECLAIM;
-				break;
-			default:
-				/* Buffer already on hardware. */
-				break;
-			}
-		}
-	}
-}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 535f16e7882e706b1e0573345b58711b7f6f2d94..243cacb3575c088ac1f378c48e2c72e8a465cc0e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -48,7 +48,6 @@
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
-#include "drm_legacy.h"
 
 MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
 MODULE_DESCRIPTION("DRM shared core routines");
@@ -585,8 +584,6 @@ static void drm_fs_inode_free(struct inode *inode)
 
 static void drm_dev_init_release(struct drm_device *dev, void *res)
 {
-	drm_legacy_ctxbitmap_cleanup(dev);
-	drm_legacy_remove_map_hash(dev);
 	drm_fs_inode_free(dev->anon_inode);
 
 	put_device(dev->dev);
@@ -597,7 +594,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
 	mutex_destroy(&dev->clientlist_mutex);
 	mutex_destroy(&dev->filelist_mutex);
 	mutex_destroy(&dev->struct_mutex);
-	drm_legacy_destroy_members(dev);
 }
 
 static int drm_dev_init(struct drm_device *dev,
@@ -632,7 +628,6 @@ static int drm_dev_init(struct drm_device *dev,
 		return -EINVAL;
 	}
 
-	drm_legacy_init_members(dev);
 	INIT_LIST_HEAD(&dev->filelist);
 	INIT_LIST_HEAD(&dev->filelist_internal);
 	INIT_LIST_HEAD(&dev->clientlist);
@@ -673,12 +668,6 @@ static int drm_dev_init(struct drm_device *dev,
 			goto err;
 	}
 
-	ret = drm_legacy_create_map_hash(dev);
-	if (ret)
-		goto err;
-
-	drm_legacy_ctxbitmap_init(dev);
-
 	if (drm_core_check_feature(dev, DRIVER_GEM)) {
 		ret = drm_gem_init(dev);
 		if (ret) {
@@ -949,8 +938,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 			goto err_minors;
 	}
 
-	if (drm_core_check_feature(dev, DRIVER_MODESET))
-		drm_modeset_register_all(dev);
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		ret = drm_modeset_register_all(dev);
+		if (ret)
+			goto err_unload;
+	}
 
 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
 		 driver->name, driver->major, driver->minor,
@@ -960,6 +952,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
 
 	goto out_unlock;
 
+err_unload:
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
 err_minors:
 	remove_compat_control_link(dev);
 	drm_minor_unregister(dev, DRM_MINOR_ACCEL);
@@ -990,9 +985,6 @@ EXPORT_SYMBOL(drm_dev_register);
  */
 void drm_dev_unregister(struct drm_device *dev)
 {
-	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		drm_lastclose(dev);
-
 	dev->registered = false;
 
 	drm_client_dev_unregister(dev);
@@ -1003,9 +995,6 @@ void drm_dev_unregister(struct drm_device *dev)
 	if (dev->driver->unload)
 		dev->driver->unload(dev);
 
-	drm_legacy_pci_agp_destroy(dev);
-	drm_legacy_rmmaps(dev);
-
 	remove_compat_control_link(dev);
 	drm_minor_unregister(dev, DRM_MINOR_ACCEL);
 	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 1143bc7f325227ee252f6d593ffea55d0daa9761..8f2bc6a28482229fd0b030a1958f87753ad7885f 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -30,6 +30,7 @@
 #include <drm/drm_print.h>
 
 #include "drm_crtc_internal.h"
+#include "drm_internal.h"
 
 /**
  * DOC: overview
@@ -74,6 +75,8 @@ int drm_encoder_register_all(struct drm_device *dev)
 	int ret = 0;
 
 	drm_for_each_encoder(encoder, dev) {
+		drm_debugfs_encoder_add(encoder);
+
 		if (encoder->funcs && encoder->funcs->late_register)
 			ret = encoder->funcs->late_register(encoder);
 		if (ret)
@@ -90,6 +93,7 @@ void drm_encoder_unregister_all(struct drm_device *dev)
 	drm_for_each_encoder(encoder, dev) {
 		if (encoder->funcs && encoder->funcs->early_unregister)
 			encoder->funcs->early_unregister(encoder);
+		drm_debugfs_encoder_remove(encoder);
 	}
 }
 
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 5ddaffd325865fa813e301974b33a1122cc49a21..8c87287c3e16800369d6a5e79acde5d9168c146c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -47,21 +47,12 @@
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
-#include "drm_legacy.h"
 
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
 
 bool drm_dev_needs_global_mutex(struct drm_device *dev)
 {
-	/*
-	 * Legacy drivers rely on all kinds of BKL locking semantics, don't
-	 * bother. They also still need BKL locking for their ioctls, so better
-	 * safe than sorry.
-	 */
-	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		return true;
-
 	/*
 	 * The deprecated ->load callback must be called after the driver is
 	 * already registered. This means such drivers rely on the BKL to make
@@ -107,9 +98,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
  * drm_send_event() as the main starting points.
  *
  * The memory mapping implementation will vary depending on how the driver
- * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
- * function, modern drivers should use one of the provided memory-manager
- * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ * manages memory. For GEM-based drivers this is drm_gem_mmap().
  *
  * No other file operations are supported by the DRM userspace API. Overall the
  * following is an example &file_operations structure::
@@ -254,18 +243,6 @@ void drm_file_free(struct drm_file *file)
 		     (long)old_encode_dev(file->minor->kdev->devt),
 		     atomic_read(&dev->open_count));
 
-#ifdef CONFIG_DRM_LEGACY
-	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
-	    dev->driver->preclose)
-		dev->driver->preclose(dev, file);
-#endif
-
-	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		drm_legacy_lock_release(dev, file->filp);
-
-	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-		drm_legacy_reclaim_buffers(dev, file);
-
 	drm_events_release(file);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -279,8 +256,6 @@ void drm_file_free(struct drm_file *file)
 	if (drm_core_check_feature(dev, DRIVER_GEM))
 		drm_gem_release(dev, file);
 
-	drm_legacy_ctxbitmap_flush(dev, file);
-
 	if (drm_is_primary_client(file))
 		drm_master_release(file);
 
@@ -367,29 +342,6 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
 	list_add(&priv->lhead, &dev->filelist);
 	mutex_unlock(&dev->filelist_mutex);
 
-#ifdef CONFIG_DRM_LEGACY
-#ifdef __alpha__
-	/*
-	 * Default the hose
-	 */
-	if (!dev->hose) {
-		struct pci_dev *pci_dev;
-
-		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
-		if (pci_dev) {
-			dev->hose = pci_dev->sysdata;
-			pci_dev_put(pci_dev);
-		}
-		if (!dev->hose) {
-			struct pci_bus *b = list_entry(pci_root_buses.next,
-				struct pci_bus, node);
-			if (b)
-				dev->hose = b->sysdata;
-		}
-	}
-#endif
-#endif
-
 	return 0;
 }
 
@@ -411,7 +363,6 @@ int drm_open(struct inode *inode, struct file *filp)
 	struct drm_device *dev;
 	struct drm_minor *minor;
 	int retcode;
-	int need_setup = 0;
 
 	minor = drm_minor_acquire(iminor(inode));
 	if (IS_ERR(minor))
@@ -421,8 +372,7 @@ int drm_open(struct inode *inode, struct file *filp)
 	if (drm_dev_needs_global_mutex(dev))
 		mutex_lock(&drm_global_mutex);
 
-	if (!atomic_fetch_inc(&dev->open_count))
-		need_setup = 1;
+	atomic_fetch_inc(&dev->open_count);
 
 	/* share address_space across all char-devs of a single device */
 	filp->f_mapping = dev->anon_inode->i_mapping;
@@ -430,13 +380,6 @@ int drm_open(struct inode *inode, struct file *filp)
 	retcode = drm_open_helper(filp, minor);
 	if (retcode)
 		goto err_undo;
-	if (need_setup) {
-		retcode = drm_legacy_setup(dev);
-		if (retcode) {
-			drm_close_helper(filp);
-			goto err_undo;
-		}
-	}
 
 	if (drm_dev_needs_global_mutex(dev))
 		mutex_unlock(&drm_global_mutex);
@@ -460,9 +403,6 @@ void drm_lastclose(struct drm_device * dev)
 		dev->driver->lastclose(dev);
 	drm_dbg_core(dev, "driver lastclose completed\n");
 
-	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		drm_legacy_dev_reinit(dev);
-
 	drm_client_dev_restore(dev);
 }
 
@@ -958,7 +898,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
 {
 	struct drm_gem_object *obj;
 	struct drm_memory_stats status = {};
-	enum drm_gem_object_status supported_status;
+	enum drm_gem_object_status supported_status = 0;
 	int id;
 
 	spin_lock(&file->table_lock);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 09e289fca5c3121562353a51851fd4bdf4706050..3cc0ffc28e862158d16a26255efa8d87302283f8 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -583,7 +583,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
 	struct drm_mode_fb_cmd2 *r = data;
 	struct drm_framebuffer *fb;
 	unsigned int i;
-	int ret;
+	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 54f5e8851de5ddb79b4bb933a0e3d901b9021f43..9c0922c1a5a27ccb52e13308e1ef0913c4b83cb9 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1085,6 +1085,37 @@ drm_gpuvm_put(struct drm_gpuvm *gpuvm)
 }
 EXPORT_SYMBOL_GPL(drm_gpuvm_put);
 
+static int
+exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+		 unsigned int num_fences)
+{
+	return num_fences ? drm_exec_prepare_obj(exec, obj, num_fences) :
+			    drm_exec_lock_obj(exec, obj);
+}
+
+/**
+ * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
+ * @gpuvm: the &drm_gpuvm
+ * @exec: the &drm_exec context
+ * @num_fences: the amount of &dma_fences to reserve
+ *
+ * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object; if
+ * @num_fences is zero drm_exec_lock_obj() is called instead.
+ *
+ * Using this function directly, it is the drivers responsibility to call
+ * drm_exec_init() and drm_exec_fini() accordingly.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+int
+drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+		     struct drm_exec *exec,
+		     unsigned int num_fences)
+{
+	return exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_vm);
+
 static int
 __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
 			    struct drm_exec *exec,
@@ -1095,7 +1126,7 @@ __drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
 	int ret = 0;
 
 	for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) {
-		ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences);
+		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
 		if (ret)
 			break;
 	}
@@ -1116,7 +1147,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
 
 	drm_gpuvm_resv_assert_held(gpuvm);
 	list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
-		ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences);
+		ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
 		if (ret)
 			break;
 
@@ -1134,7 +1165,8 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
  * @num_fences: the amount of &dma_fences to reserve
  *
  * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given
- * &drm_gpuvm contains mappings of.
+ * &drm_gpuvm contains mappings of; if @num_fences is zero drm_exec_lock_obj()
+ * is called instead.
  *
  * Using this function directly, it is the drivers responsibility to call
  * drm_exec_init() and drm_exec_fini() accordingly.
@@ -1171,7 +1203,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects);
  * @num_fences: the amount of &dma_fences to reserve
  *
  * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr
- * and @addr + @range.
+ * and @addr + @range; if @num_fences is zero drm_exec_lock_obj() is called
+ * instead.
  *
  * Returns: 0 on success, negative error code on failure.
  */
@@ -1186,7 +1219,7 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
 	drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
 		struct drm_gem_object *obj = va->gem.obj;
 
-		ret = drm_exec_prepare_obj(exec, obj, num_fences);
+		ret = exec_prepare_obj(exec, obj, num_fences);
 		if (ret)
 			return ret;
 	}
@@ -1502,14 +1535,18 @@ drm_gpuvm_bo_destroy(struct kref *kref)
  * hold the dma-resv or driver specific GEM gpuva lock.
  *
  * This function may only be called from non-atomic context.
+ *
+ * Returns: true if vm_bo was destroyed, false otherwise.
  */
-void
+bool
 drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
 {
 	might_sleep();
 
 	if (vm_bo)
-		kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
+		return !!kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy);
+
+	return false;
 }
 EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
 
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
deleted file mode 100644
index 60afa18655599483ba9d3e4f05792ead5f16fe8f..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_hashtab.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/hash.h>
-#include <linux/mm.h>
-#include <linux/rculist.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
-{
-	unsigned int size = 1 << order;
-
-	ht->order = order;
-	ht->table = NULL;
-	if (size <= PAGE_SIZE / sizeof(*ht->table))
-		ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
-	else
-		ht->table = vzalloc(array_size(size, sizeof(*ht->table)));
-	if (!ht->table) {
-		DRM_ERROR("Out of memory for hash table\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
-{
-	struct drm_hash_item *entry;
-	struct hlist_head *h_list;
-	unsigned int hashed_key;
-	int count = 0;
-
-	hashed_key = hash_long(key, ht->order);
-	DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
-	h_list = &ht->table[hashed_key];
-	hlist_for_each_entry(entry, h_list, head)
-		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-}
-
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
-					  unsigned long key)
-{
-	struct drm_hash_item *entry;
-	struct hlist_head *h_list;
-	unsigned int hashed_key;
-
-	hashed_key = hash_long(key, ht->order);
-	h_list = &ht->table[hashed_key];
-	hlist_for_each_entry(entry, h_list, head) {
-		if (entry->key == key)
-			return &entry->head;
-		if (entry->key > key)
-			break;
-	}
-	return NULL;
-}
-
-static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
-					      unsigned long key)
-{
-	struct drm_hash_item *entry;
-	struct hlist_head *h_list;
-	unsigned int hashed_key;
-
-	hashed_key = hash_long(key, ht->order);
-	h_list = &ht->table[hashed_key];
-	hlist_for_each_entry_rcu(entry, h_list, head) {
-		if (entry->key == key)
-			return &entry->head;
-		if (entry->key > key)
-			break;
-	}
-	return NULL;
-}
-
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
-	struct drm_hash_item *entry;
-	struct hlist_head *h_list;
-	struct hlist_node *parent;
-	unsigned int hashed_key;
-	unsigned long key = item->key;
-
-	hashed_key = hash_long(key, ht->order);
-	h_list = &ht->table[hashed_key];
-	parent = NULL;
-	hlist_for_each_entry(entry, h_list, head) {
-		if (entry->key == key)
-			return -EINVAL;
-		if (entry->key > key)
-			break;
-		parent = &entry->head;
-	}
-	if (parent) {
-		hlist_add_behind_rcu(&item->head, parent);
-	} else {
-		hlist_add_head_rcu(&item->head, h_list);
-	}
-	return 0;
-}
-
-/*
- * Just insert an item and return any "bits" bit key that hasn't been
- * used before.
- */
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
-			      unsigned long seed, int bits, int shift,
-			      unsigned long add)
-{
-	int ret;
-	unsigned long mask = (1UL << bits) - 1;
-	unsigned long first, unshifted_key;
-
-	unshifted_key = hash_long(seed, bits);
-	first = unshifted_key;
-	do {
-		item->key = (unshifted_key << shift) + add;
-		ret = drm_ht_insert_item(ht, item);
-		if (ret)
-			unshifted_key = (unshifted_key + 1) & mask;
-	} while(ret && (unshifted_key != first));
-
-	if (ret) {
-		DRM_ERROR("Available key bit space exhausted\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
-		     struct drm_hash_item **item)
-{
-	struct hlist_node *list;
-
-	list = drm_ht_find_key_rcu(ht, key);
-	if (!list)
-		return -EINVAL;
-
-	*item = hlist_entry(list, struct drm_hash_item, head);
-	return 0;
-}
-
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
-{
-	struct hlist_node *list;
-
-	list = drm_ht_find_key(ht, key);
-	if (list) {
-		hlist_del_init_rcu(list);
-		return 0;
-	}
-	return -EINVAL;
-}
-
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
-{
-	hlist_del_init_rcu(&item->head);
-	return 0;
-}
-
-void drm_ht_remove(struct drm_open_hash *ht)
-{
-	if (ht->table) {
-		kvfree(ht->table);
-		ht->table = NULL;
-	}
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b12c463bc46050608d4d47211a9f81644c9568c2..8e4faf0a28e6cf77b4df8c182a2aa14e544a6458 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -117,17 +117,10 @@ void drm_handle_vblank_works(struct drm_vblank_crtc *vblank);
 /* IOCTLS */
 int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *filp);
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *file_priv);
 
 /* drm_irq.c */
 
 /* IOCTLS */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv);
-#endif
-
 int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *filp);
 
@@ -194,6 +187,8 @@ void drm_debugfs_connector_remove(struct drm_connector *connector);
 void drm_debugfs_crtc_add(struct drm_crtc *crtc);
 void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
 void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
+void drm_debugfs_encoder_add(struct drm_encoder *encoder);
+void drm_debugfs_encoder_remove(struct drm_encoder *encoder);
 #else
 static inline void drm_debugfs_dev_fini(struct drm_device *dev)
 {
@@ -231,6 +226,14 @@ static inline void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
 {
 }
 
+static inline void drm_debugfs_encoder_add(struct drm_encoder *encoder)
+{
+}
+
+static inline void drm_debugfs_encoder_remove(struct drm_encoder *encoder)
+{
+}
+
 #endif
 
 drm_ioctl_t drm_version;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 025dc558c94ee1aafd598e138534a9b441711bcb..129e2b91dbfe7f785a821c165db0c0b88ff4b1c9 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -31,12 +31,12 @@
 #include <linux/ratelimit.h>
 #include <linux/export.h>
 
+#include <drm/drm_device.h>
 #include <drm/drm_file.h>
 #include <drm/drm_print.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
-#include "drm_legacy.h"
 
 #define DRM_IOCTL_VERSION32		DRM_IOWR(0x00, drm_version32_t)
 #define DRM_IOCTL_GET_UNIQUE32		DRM_IOWR(0x01, drm_unique32_t)
@@ -163,92 +163,6 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
 	return -EINVAL;
 }
 
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_map32 {
-	u32 offset;		/* Requested physical address (0 for SAREA) */
-	u32 size;		/* Requested physical size (bytes) */
-	enum drm_map_type type;	/* Type of memory to map */
-	enum drm_map_flags flags;	/* Flags */
-	u32 handle;		/* User-space: "Handle" to pass to mmap() */
-	int mtrr;		/* MTRR slot used */
-} drm_map32_t;
-
-static int compat_drm_getmap(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	drm_map32_t __user *argp = (void __user *)arg;
-	drm_map32_t m32;
-	struct drm_map map;
-	int err;
-
-	if (copy_from_user(&m32, argp, sizeof(m32)))
-		return -EFAULT;
-
-	map.offset = m32.offset;
-	err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
-	if (err)
-		return err;
-
-	m32.offset = map.offset;
-	m32.size = map.size;
-	m32.type = map.type;
-	m32.flags = map.flags;
-	m32.handle = ptr_to_compat((void __user *)map.handle);
-	m32.mtrr = map.mtrr;
-	if (copy_to_user(argp, &m32, sizeof(m32)))
-		return -EFAULT;
-	return 0;
-
-}
-
-static int compat_drm_addmap(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	drm_map32_t __user *argp = (void __user *)arg;
-	drm_map32_t m32;
-	struct drm_map map;
-	int err;
-
-	if (copy_from_user(&m32, argp, sizeof(m32)))
-		return -EFAULT;
-
-	map.offset = m32.offset;
-	map.size = m32.size;
-	map.type = m32.type;
-	map.flags = m32.flags;
-
-	err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-	if (err)
-		return err;
-
-	m32.offset = map.offset;
-	m32.mtrr = map.mtrr;
-	m32.handle = ptr_to_compat((void __user *)map.handle);
-	if (map.handle != compat_ptr(m32.handle))
-		pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
-				   map.handle, m32.type, m32.offset);
-
-	if (copy_to_user(argp, &m32, sizeof(m32)))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int compat_drm_rmmap(struct file *file, unsigned int cmd,
-			    unsigned long arg)
-{
-	drm_map32_t __user *argp = (void __user *)arg;
-	struct drm_map map;
-	u32 handle;
-
-	if (get_user(handle, &argp->handle))
-		return -EFAULT;
-	map.handle = compat_ptr(handle);
-	return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
-}
-#endif
-
 typedef struct drm_client32 {
 	int idx;	/* Which client desired? */
 	int auth;	/* Is client authenticated? */
@@ -308,501 +222,6 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
 	return 0;
 }
 
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-typedef struct drm_buf_desc32 {
-	int count;		 /* Number of buffers of this size */
-	int size;		 /* Size in bytes */
-	int low_mark;		 /* Low water mark */
-	int high_mark;		 /* High water mark */
-	int flags;
-	u32 agp_start;		 /* Start address in the AGP aperture */
-} drm_buf_desc32_t;
-
-static int compat_drm_addbufs(struct file *file, unsigned int cmd,
-			      unsigned long arg)
-{
-	drm_buf_desc32_t __user *argp = (void __user *)arg;
-	drm_buf_desc32_t desc32;
-	struct drm_buf_desc desc;
-	int err;
-
-	if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
-		return -EFAULT;
-
-	desc = (struct drm_buf_desc){
-		desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
-		desc32.flags, desc32.agp_start
-	};
-
-	err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
-				   DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-	if (err)
-		return err;
-
-	desc32 = (drm_buf_desc32_t){
-		desc.count, desc.size, desc.low_mark, desc.high_mark,
-		desc.flags, desc.agp_start
-	};
-	if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int compat_drm_markbufs(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_buf_desc32_t b32;
-	drm_buf_desc32_t __user *argp = (void __user *)arg;
-	struct drm_buf_desc buf;
-
-	if (copy_from_user(&b32, argp, sizeof(b32)))
-		return -EFAULT;
-
-	buf.size = b32.size;
-	buf.low_mark = b32.low_mark;
-	buf.high_mark = b32.high_mark;
-
-	return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_buf_info32 {
-	int count;		/**< Entries in list */
-	u32 list;
-} drm_buf_info32_t;
-
-static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
-{
-	drm_buf_info32_t *request = data;
-	drm_buf_desc32_t __user *to = compat_ptr(request->list);
-	drm_buf_desc32_t v = {.count = from->buf_count,
-			      .size = from->buf_size,
-			      .low_mark = from->low_mark,
-			      .high_mark = from->high_mark};
-
-	if (copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)))
-		return -EFAULT;
-	return 0;
-}
-
-static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	drm_buf_info32_t *request = data;
-
-	return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
-}
-
-static int compat_drm_infobufs(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_buf_info32_t req32;
-	drm_buf_info32_t __user *argp = (void __user *)arg;
-	int err;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-
-	if (req32.count < 0)
-		req32.count = 0;
-
-	err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
-	if (err)
-		return err;
-
-	if (put_user(req32.count, &argp->count))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm_buf_pub32 {
-	int idx;		/**< Index into the master buffer list */
-	int total;		/**< Buffer size */
-	int used;		/**< Amount of buffer in use (for DMA) */
-	u32 address;		/**< Address of buffer */
-} drm_buf_pub32_t;
-
-typedef struct drm_buf_map32 {
-	int count;		/**< Length of the buffer list */
-	u32 virtual;		/**< Mmap'd area in user-virtual */
-	u32 list;		/**< Buffer information */
-} drm_buf_map32_t;
-
-static int map_one_buf32(void *data, int idx, unsigned long virtual,
-			struct drm_buf *buf)
-{
-	drm_buf_map32_t *request = data;
-	drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
-	drm_buf_pub32_t v;
-
-	v.idx = buf->idx;
-	v.total = buf->total;
-	v.used = 0;
-	v.address = virtual + buf->offset;
-	if (copy_to_user(to, &v, sizeof(v)))
-		return -EFAULT;
-	return 0;
-}
-
-static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	drm_buf_map32_t *request = data;
-	void __user *v;
-	int err = __drm_legacy_mapbufs(dev, data, &request->count,
-				    &v, map_one_buf32,
-				    file_priv);
-	request->virtual = ptr_to_compat(v);
-	return err;
-}
-
-static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
-			      unsigned long arg)
-{
-	drm_buf_map32_t __user *argp = (void __user *)arg;
-	drm_buf_map32_t req32;
-	int err;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-	if (req32.count < 0)
-		return -EINVAL;
-
-	err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
-	if (err)
-		return err;
-
-	if (put_user(req32.count, &argp->count)
-	    || put_user(req32.virtual, &argp->virtual))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm_buf_free32 {
-	int count;
-	u32 list;
-} drm_buf_free32_t;
-
-static int compat_drm_freebufs(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_buf_free32_t req32;
-	struct drm_buf_free request;
-	drm_buf_free32_t __user *argp = (void __user *)arg;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-
-	request.count = req32.count;
-	request.list = compat_ptr(req32.list);
-	return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
-}
-
-typedef struct drm_ctx_priv_map32 {
-	unsigned int ctx_id;	 /**< Context requesting private mapping */
-	u32 handle;		/**< Handle of map */
-} drm_ctx_priv_map32_t;
-
-static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
-				  unsigned long arg)
-{
-	drm_ctx_priv_map32_t req32;
-	struct drm_ctx_priv_map request;
-	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-
-	request.ctx_id = req32.ctx_id;
-	request.handle = compat_ptr(req32.handle);
-	return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
-				  unsigned long arg)
-{
-	struct drm_ctx_priv_map req;
-	drm_ctx_priv_map32_t req32;
-	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
-	int err;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-
-	req.ctx_id = req32.ctx_id;
-	err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
-	if (err)
-		return err;
-
-	req32.handle = ptr_to_compat((void __user *)req.handle);
-	if (copy_to_user(argp, &req32, sizeof(req32)))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm_ctx_res32 {
-	int count;
-	u32 contexts;
-} drm_ctx_res32_t;
-
-static int compat_drm_resctx(struct file *file, unsigned int cmd,
-			     unsigned long arg)
-{
-	drm_ctx_res32_t __user *argp = (void __user *)arg;
-	drm_ctx_res32_t res32;
-	struct drm_ctx_res res;
-	int err;
-
-	if (copy_from_user(&res32, argp, sizeof(res32)))
-		return -EFAULT;
-
-	res.count = res32.count;
-	res.contexts = compat_ptr(res32.contexts);
-	err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
-	if (err)
-		return err;
-
-	res32.count = res.count;
-	if (copy_to_user(argp, &res32, sizeof(res32)))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm_dma32 {
-	int context;		  /**< Context handle */
-	int send_count;		  /**< Number of buffers to send */
-	u32 send_indices;	  /**< List of handles to buffers */
-	u32 send_sizes;		  /**< Lengths of data to send */
-	enum drm_dma_flags flags;		  /**< Flags */
-	int request_count;	  /**< Number of buffers requested */
-	int request_size;	  /**< Desired size for buffers */
-	u32 request_indices;	  /**< Buffer information */
-	u32 request_sizes;
-	int granted_count;	  /**< Number of buffers granted */
-} drm_dma32_t;
-
-static int compat_drm_dma(struct file *file, unsigned int cmd,
-			  unsigned long arg)
-{
-	drm_dma32_t d32;
-	drm_dma32_t __user *argp = (void __user *)arg;
-	struct drm_dma d;
-	int err;
-
-	if (copy_from_user(&d32, argp, sizeof(d32)))
-		return -EFAULT;
-
-	d.context = d32.context;
-	d.send_count = d32.send_count;
-	d.send_indices = compat_ptr(d32.send_indices);
-	d.send_sizes = compat_ptr(d32.send_sizes);
-	d.flags = d32.flags;
-	d.request_count = d32.request_count;
-	d.request_indices = compat_ptr(d32.request_indices);
-	d.request_sizes = compat_ptr(d32.request_sizes);
-	err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
-	if (err)
-		return err;
-
-	if (put_user(d.request_size, &argp->request_size)
-	    || put_user(d.granted_count, &argp->granted_count))
-		return -EFAULT;
-
-	return 0;
-}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#if IS_ENABLED(CONFIG_AGP)
-typedef struct drm_agp_mode32 {
-	u32 mode;	/**< AGP mode */
-} drm_agp_mode32_t;
-
-static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
-				 unsigned long arg)
-{
-	drm_agp_mode32_t __user *argp = (void __user *)arg;
-	struct drm_agp_mode mode;
-
-	if (get_user(mode.mode, &argp->mode))
-		return -EFAULT;
-
-	return drm_ioctl_kernel(file,  drm_legacy_agp_enable_ioctl, &mode,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_info32 {
-	int agp_version_major;
-	int agp_version_minor;
-	u32 mode;
-	u32 aperture_base;	/* physical address */
-	u32 aperture_size;	/* bytes */
-	u32 memory_allowed;	/* bytes */
-	u32 memory_used;
-
-	/* PCI information */
-	unsigned short id_vendor;
-	unsigned short id_device;
-} drm_agp_info32_t;
-
-static int compat_drm_agp_info(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_agp_info32_t __user *argp = (void __user *)arg;
-	drm_agp_info32_t i32;
-	struct drm_agp_info info;
-	int err;
-
-	err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH);
-	if (err)
-		return err;
-
-	i32.agp_version_major = info.agp_version_major;
-	i32.agp_version_minor = info.agp_version_minor;
-	i32.mode = info.mode;
-	i32.aperture_base = info.aperture_base;
-	i32.aperture_size = info.aperture_size;
-	i32.memory_allowed = info.memory_allowed;
-	i32.memory_used = info.memory_used;
-	i32.id_vendor = info.id_vendor;
-	i32.id_device = info.id_device;
-	if (copy_to_user(argp, &i32, sizeof(i32)))
-		return -EFAULT;
-
-	return 0;
-}
-
-typedef struct drm_agp_buffer32 {
-	u32 size;	/**< In bytes -- will round to page boundary */
-	u32 handle;	/**< Used for binding / unbinding */
-	u32 type;	/**< Type of memory to allocate */
-	u32 physical;	/**< Physical used by i810 */
-} drm_agp_buffer32_t;
-
-static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
-				unsigned long arg)
-{
-	drm_agp_buffer32_t __user *argp = (void __user *)arg;
-	drm_agp_buffer32_t req32;
-	struct drm_agp_buffer request;
-	int err;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-
-	request.size = req32.size;
-	request.type = req32.type;
-	err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-	if (err)
-		return err;
-
-	req32.handle = request.handle;
-	req32.physical = request.physical;
-	if (copy_to_user(argp, &req32, sizeof(req32))) {
-		drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-		return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int compat_drm_agp_free(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_agp_buffer32_t __user *argp = (void __user *)arg;
-	struct drm_agp_buffer request;
-
-	if (get_user(request.handle, &argp->handle))
-		return -EFAULT;
-
-	return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_agp_binding32 {
-	u32 handle;	/**< From drm_agp_buffer */
-	u32 offset;	/**< In bytes -- will round to page boundary */
-} drm_agp_binding32_t;
-
-static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_agp_binding32_t __user *argp = (void __user *)arg;
-	drm_agp_binding32_t req32;
-	struct drm_agp_binding request;
-
-	if (copy_from_user(&req32, argp, sizeof(req32)))
-		return -EFAULT;
-
-	request.handle = req32.handle;
-	request.offset = req32.offset;
-	return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
-				 unsigned long arg)
-{
-	drm_agp_binding32_t __user *argp = (void __user *)arg;
-	struct drm_agp_binding request;
-
-	if (get_user(request.handle, &argp->handle))
-		return -EFAULT;
-
-	return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif /* CONFIG_AGP */
-
-typedef struct drm_scatter_gather32 {
-	u32 size;	/**< In bytes -- will round to page boundary */
-	u32 handle;	/**< Used for mapping / unmapping */
-} drm_scatter_gather32_t;
-
-static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
-			       unsigned long arg)
-{
-	drm_scatter_gather32_t __user *argp = (void __user *)arg;
-	struct drm_scatter_gather request;
-	int err;
-
-	if (get_user(request.size, &argp->size))
-		return -EFAULT;
-
-	err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-	if (err)
-		return err;
-
-	/* XXX not sure about the handle conversion here... */
-	if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
-		return -EFAULT;
-
-	return 0;
-}
-
-static int compat_drm_sg_free(struct file *file, unsigned int cmd,
-			      unsigned long arg)
-{
-	drm_scatter_gather32_t __user *argp = (void __user *)arg;
-	struct drm_scatter_gather request;
-	unsigned long x;
-
-	if (get_user(x, &argp->handle))
-		return -EFAULT;
-	request.handle = x << PAGE_SHIFT;
-	return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
-				DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-#endif
 #if defined(CONFIG_X86)
 typedef struct drm_update_draw32 {
 	drm_drawable_t handle;
@@ -854,7 +273,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
 	req.request.type = req32.request.type;
 	req.request.sequence = req32.request.sequence;
 	req.request.signal = req32.request.signal;
-	err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
+	err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, 0);
 
 	req32.reply.type = req.reply.type;
 	req32.reply.sequence = req.reply.sequence;
@@ -914,37 +333,9 @@ static struct {
 #define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
 	DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
 	DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
-#endif
 	DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
 	DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
 	DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
-	DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
-	DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
-	DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
-	DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
-	DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
-	DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
-	DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
-	DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
-	DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
-	DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
-#if IS_ENABLED(CONFIG_AGP)
-	DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
-	DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
-	DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
-	DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
-	DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
-	DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
-#endif
-#endif
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
-	DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
-#endif
 #if defined(CONFIG_X86)
 	DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
 #endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 44fda68c28aebf973039fd4cec9d32adcc33dda0..e368fc084c77d4e22d9e5bc42ef823d733409728 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -42,7 +42,6 @@
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
-#include "drm_legacy.h"
 
 /**
  * DOC: getunique and setversion story
@@ -301,6 +300,10 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
 	case DRM_CAP_CRTC_IN_VBLANK_EVENT:
 		req->value = 1;
 		break;
+	case DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP:
+		req->value = drm_core_check_feature(dev, DRIVER_ATOMIC) &&
+			     dev->mode_config.async_page_flip;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -361,6 +364,15 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
 			return -EINVAL;
 		file_priv->writeback_connectors = req->value;
 		break;
+	case DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT:
+		if (!drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT))
+			return -EOPNOTSUPP;
+		if (!file_priv->atomic)
+			return -EINVAL;
+		if (req->value > 1)
+			return -EINVAL;
+		file_priv->supports_virtualized_cursor_plane = req->value;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -559,21 +571,11 @@ static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 		.name = #ioctl			\
 	}
 
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags)  DRM_IOCTL_DEF(ioctl, _func, _flags)
-#else
-#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
-#endif
-
 /* Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid,
-			     DRM_MASTER|DRM_ROOT_ONLY),
-
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
@@ -586,63 +588,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
 
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
-
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
-
 	DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
 
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
-
 	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
-
 	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
 
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if IS_ENABLED(CONFIG_AGP)
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl,
-			     DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
-
-	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, 0),
 
 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
@@ -775,7 +729,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
 {
 	struct drm_file *file_priv = file->private_data;
 	struct drm_device *dev = file_priv->minor->dev;
-	int retcode;
+	int ret;
 
 	/* Update drm_file owner if fd was passed along. */
 	drm_file_update_pid(file_priv);
@@ -783,20 +737,11 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
 	if (drm_dev_is_unplugged(dev))
 		return -ENODEV;
 
-	retcode = drm_ioctl_permit(flags, file_priv);
-	if (unlikely(retcode))
-		return retcode;
-
-	/* Enforce sane locking for modern driver ioctls. */
-	if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
-	    (flags & DRM_UNLOCKED))
-		retcode = func(dev, kdata, file_priv);
-	else {
-		mutex_lock(&drm_global_mutex);
-		retcode = func(dev, kdata, file_priv);
-		mutex_unlock(&drm_global_mutex);
-	}
-	return retcode;
+	ret = drm_ioctl_permit(flags, file_priv);
+	if (unlikely(ret))
+		return ret;
+
+	return func(dev, kdata, file_priv);
 }
 EXPORT_SYMBOL(drm_ioctl_kernel);
 
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
deleted file mode 100644
index d327638e15ee74b7eda309608a1630ffd0f9c9d8..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_irq.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * drm_irq.c IRQ and vblank support
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-
-#include <linux/export.h>
-#include <linux/interrupt.h>	/* For task queue support */
-#include <linux/pci.h>
-#include <linux/vgaarb.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
-
-#include "drm_internal.h"
-
-static int drm_legacy_irq_install(struct drm_device *dev, int irq)
-{
-	int ret;
-	unsigned long sh_flags = 0;
-
-	if (irq == 0)
-		return -EINVAL;
-
-	if (dev->irq_enabled)
-		return -EBUSY;
-	dev->irq_enabled = true;
-
-	DRM_DEBUG("irq=%d\n", irq);
-
-	/* Before installing handler */
-	if (dev->driver->irq_preinstall)
-		dev->driver->irq_preinstall(dev);
-
-	/* PCI devices require shared interrupts. */
-	if (dev_is_pci(dev->dev))
-		sh_flags = IRQF_SHARED;
-
-	ret = request_irq(irq, dev->driver->irq_handler,
-			  sh_flags, dev->driver->name, dev);
-
-	if (ret < 0) {
-		dev->irq_enabled = false;
-		return ret;
-	}
-
-	/* After installing handler */
-	if (dev->driver->irq_postinstall)
-		ret = dev->driver->irq_postinstall(dev);
-
-	if (ret < 0) {
-		dev->irq_enabled = false;
-		if (drm_core_check_feature(dev, DRIVER_LEGACY))
-			vga_client_unregister(to_pci_dev(dev->dev));
-		free_irq(irq, dev);
-	} else {
-		dev->irq = irq;
-	}
-
-	return ret;
-}
-
-int drm_legacy_irq_uninstall(struct drm_device *dev)
-{
-	unsigned long irqflags;
-	bool irq_enabled;
-	int i;
-
-	irq_enabled = dev->irq_enabled;
-	dev->irq_enabled = false;
-
-	/*
-	 * Wake up any waiters so they don't hang. This is just to paper over
-	 * issues for UMS drivers which aren't in full control of their
-	 * vblank/irq handling. KMS drivers must ensure that vblanks are all
-	 * disabled when uninstalling the irq handler.
-	 */
-	if (drm_dev_has_vblank(dev)) {
-		spin_lock_irqsave(&dev->vbl_lock, irqflags);
-		for (i = 0; i < dev->num_crtcs; i++) {
-			struct drm_vblank_crtc *vblank = &dev->vblank[i];
-
-			if (!vblank->enabled)
-				continue;
-
-			WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
-
-			drm_vblank_disable_and_save(dev, i);
-			wake_up(&vblank->queue);
-		}
-		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-	}
-
-	if (!irq_enabled)
-		return -EINVAL;
-
-	DRM_DEBUG("irq=%d\n", dev->irq);
-
-	if (drm_core_check_feature(dev, DRIVER_LEGACY))
-		vga_client_unregister(to_pci_dev(dev->dev));
-
-	if (dev->driver->irq_uninstall)
-		dev->driver->irq_uninstall(dev);
-
-	free_irq(dev->irq, dev);
-
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_irq_uninstall);
-
-int drm_legacy_irq_control(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv)
-{
-	struct drm_control *ctl = data;
-	int ret = 0, irq;
-	struct pci_dev *pdev;
-
-	/* if we haven't irq we fallback for compatibility reasons -
-	 * this used to be a separate function in drm_dma.h
-	 */
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-		return 0;
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return 0;
-	/* UMS was only ever supported on pci devices. */
-	if (WARN_ON(!dev_is_pci(dev->dev)))
-		return -EINVAL;
-
-	switch (ctl->func) {
-	case DRM_INST_HANDLER:
-		pdev = to_pci_dev(dev->dev);
-		irq = pdev->irq;
-
-		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
-		    ctl->irq != irq)
-			return -EINVAL;
-		mutex_lock(&dev->struct_mutex);
-		ret = drm_legacy_irq_install(dev, irq);
-		mutex_unlock(&dev->struct_mutex);
-
-		return ret;
-	case DRM_UNINST_HANDLER:
-		mutex_lock(&dev->struct_mutex);
-		ret = drm_legacy_irq_uninstall(dev);
-		mutex_unlock(&dev->struct_mutex);
-
-		return ret;
-	default:
-		return -EINVAL;
-	}
-}
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
deleted file mode 100644
index 70c9dba114a61ac20bf43d048c1b87b271be1e6f..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_legacy.h
+++ /dev/null
@@ -1,290 +0,0 @@
-#ifndef __DRM_LEGACY_H__
-#define __DRM_LEGACY_H__
-
-/*
- * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * This file contains legacy interfaces that modern drm drivers
- * should no longer be using. They cannot be removed as legacy
- * drivers use them, and removing them are API breaks.
- */
-#include <linux/list.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-
-struct agp_memory;
-struct drm_buf_desc;
-struct drm_device;
-struct drm_file;
-struct drm_hash_item;
-struct drm_open_hash;
-
-/*
- * Hash-table Support
- */
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-/* drm_hashtab.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
-			      unsigned long seed, int bits, int shift,
-			      unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-#endif
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-/*
- * Generic DRM Contexts
- */
-
-#define DRM_KERNEL_CONTEXT		0
-#define DRM_RESERVED_CONTEXTS		1
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_ctxbitmap_init(struct drm_device *dev);
-void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
-#else
-static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
-static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
-#endif
-
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-/*
- * Generic Buffer Management
- */
-
-#define DRM_MAP_HASH_OFFSET 0x10000000
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
-	return drm_ht_create(&dev->map_hash, 12);
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
-{
-	drm_ht_remove(&dev->map_hash);
-}
-#else
-static inline int drm_legacy_create_map_hash(struct drm_device *dev)
-{
-	return 0;
-}
-
-static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
-#endif
-
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv);
-int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-
-int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
-#endif
-
-int __drm_legacy_infobufs(struct drm_device *, void *, int *,
-			  int (*)(void *, int, struct drm_buf_entry *));
-int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
-			  void __user **,
-			  int (*)(void *, int, unsigned long, struct drm_buf *),
-			  struct drm_file *);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_master_rmmaps(struct drm_device *dev,
-			      struct drm_master *master);
-void drm_legacy_rmmaps(struct drm_device *dev);
-#else
-static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
-					    struct drm_master *master) {}
-static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *d);
-#else
-static inline void drm_legacy_vma_flush(struct drm_device *d)
-{
-	/* do nothing */
-}
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_mem {
-	unsigned long handle;
-	struct agp_memory *memory;
-	unsigned long bound;
-	int pages;
-	struct list_head head;
-};
-
-/* drm_agpsupport.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-void drm_legacy_agp_clear(struct drm_device *dev);
-
-int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *file_priv);
-int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *file_priv);
-int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
-			       struct drm_file *file_priv);
-int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
-				struct drm_file *file_priv);
-int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
-			      struct drm_file *file_priv);
-#else
-static inline void drm_legacy_agp_clear(struct drm_device *dev) {}
-#endif
-
-/* drm_lock.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
-#else
-static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
-#endif
-
-/* DMA support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-int drm_legacy_dma_setup(struct drm_device *dev);
-void drm_legacy_dma_takedown(struct drm_device *dev);
-#else
-static inline int drm_legacy_dma_setup(struct drm_device *dev)
-{
-	return 0;
-}
-#endif
-
-void drm_legacy_free_buffer(struct drm_device *dev,
-			    struct drm_buf * buf);
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_reclaim_buffers(struct drm_device *dev,
-				struct drm_file *filp);
-#else
-static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
-					      struct drm_file *filp) {}
-#endif
-
-/* Scatter Gather Support */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_sg_cleanup(struct drm_device *dev);
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
-			struct drm_file *file_priv);
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv);
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_init_members(struct drm_device *dev);
-void drm_legacy_destroy_members(struct drm_device *dev);
-void drm_legacy_dev_reinit(struct drm_device *dev);
-int drm_legacy_setup(struct drm_device * dev);
-#else
-static inline void drm_legacy_init_members(struct drm_device *dev) {}
-static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
-static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
-static inline int drm_legacy_setup(struct drm_device * dev) { return 0; }
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
-#else
-static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_master_legacy_init(struct drm_master *master);
-#else
-static inline void drm_master_legacy_init(struct drm_master *master) {}
-#endif
-
-/* drm_pci.c */
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void drm_legacy_pci_agp_destroy(struct drm_device *dev);
-#else
-static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
-					  struct drm_file *file_priv)
-{
-	return -EINVAL;
-}
-
-static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
-#endif
-
-#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
deleted file mode 100644
index d4c5434062d7c35164ef0561f5a15933980d2977..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * \file drm_legacy_misc.c
- * Misc legacy support functions.
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-void drm_legacy_init_members(struct drm_device *dev)
-{
-	INIT_LIST_HEAD(&dev->ctxlist);
-	INIT_LIST_HEAD(&dev->vmalist);
-	INIT_LIST_HEAD(&dev->maplist);
-	spin_lock_init(&dev->buf_lock);
-	mutex_init(&dev->ctxlist_mutex);
-}
-
-void drm_legacy_destroy_members(struct drm_device *dev)
-{
-	mutex_destroy(&dev->ctxlist_mutex);
-}
-
-int drm_legacy_setup(struct drm_device * dev)
-{
-	int ret;
-
-	if (dev->driver->firstopen &&
-	    drm_core_check_feature(dev, DRIVER_LEGACY)) {
-		ret = dev->driver->firstopen(dev);
-		if (ret != 0)
-			return ret;
-	}
-
-	ret = drm_legacy_dma_setup(dev);
-	if (ret < 0)
-		return ret;
-
-
-	DRM_DEBUG("\n");
-	return 0;
-}
-
-void drm_legacy_dev_reinit(struct drm_device *dev)
-{
-	if (dev->irq_enabled)
-		drm_legacy_irq_uninstall(dev);
-
-	mutex_lock(&dev->struct_mutex);
-
-	drm_legacy_agp_clear(dev);
-
-	drm_legacy_sg_cleanup(dev);
-	drm_legacy_vma_flush(dev);
-	drm_legacy_dma_takedown(dev);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	dev->sigdata.lock = NULL;
-
-	dev->context_flag = 0;
-	dev->last_context = 0;
-	dev->if_version = 0;
-
-	DRM_DEBUG("lastclose completed\n");
-}
-
-void drm_master_legacy_init(struct drm_master *master)
-{
-	spin_lock_init(&master->lock.spinlock);
-	init_waitqueue_head(&master->lock.lock_queue);
-}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
deleted file mode 100644
index 1efbd5389d89301718c831ffdf8ab80eb4682906..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_lock.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * \file drm_lock.c
- * IOCTLs for locking
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/sched/signal.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-
-/*
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
-		  unsigned int context)
-{
-	unsigned int old, new, prev;
-	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-	spin_lock_bh(&lock_data->spinlock);
-	do {
-		old = *lock;
-		if (old & _DRM_LOCK_HELD)
-			new = old | _DRM_LOCK_CONT;
-		else {
-			new = context | _DRM_LOCK_HELD |
-				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
-				 _DRM_LOCK_CONT : 0);
-		}
-		prev = cmpxchg(lock, old, new);
-	} while (prev != old);
-	spin_unlock_bh(&lock_data->spinlock);
-
-	if (_DRM_LOCKING_CONTEXT(old) == context) {
-		if (old & _DRM_LOCK_HELD) {
-			if (context != DRM_KERNEL_CONTEXT) {
-				DRM_ERROR("%d holds heavyweight lock\n",
-					  context);
-			}
-			return 0;
-		}
-	}
-
-	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
-		/* Have lock */
-		return 1;
-	}
-	return 0;
-}
-
-/*
- * This takes a lock forcibly and hands it to context.	Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
-			     unsigned int context)
-{
-	unsigned int old, new, prev;
-	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-	lock_data->file_priv = NULL;
-	do {
-		old = *lock;
-		new = context | _DRM_LOCK_HELD;
-		prev = cmpxchg(lock, old, new);
-	} while (prev != old);
-	return 1;
-}
-
-static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
-				unsigned int context)
-{
-	unsigned int old, new, prev;
-	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-	spin_lock_bh(&lock_data->spinlock);
-	if (lock_data->kernel_waiters != 0) {
-		drm_lock_transfer(lock_data, 0);
-		lock_data->idle_has_lock = 1;
-		spin_unlock_bh(&lock_data->spinlock);
-		return 1;
-	}
-	spin_unlock_bh(&lock_data->spinlock);
-
-	do {
-		old = *lock;
-		new = _DRM_LOCKING_CONTEXT(old);
-		prev = cmpxchg(lock, old, new);
-	} while (prev != old);
-
-	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
-		DRM_ERROR("%d freed heavyweight lock held by %d\n",
-			  context, _DRM_LOCKING_CONTEXT(old));
-		return 1;
-	}
-	wake_up_interruptible(&lock_data->lock_queue);
-	return 0;
-}
-
-/*
- * Lock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Add the current task to the lock wait queue, and attempt to take to lock.
- */
-int drm_legacy_lock(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
-{
-	DECLARE_WAITQUEUE(entry, current);
-	struct drm_lock *lock = data;
-	struct drm_master *master = file_priv->master;
-	int ret = 0;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	++file_priv->lock_count;
-
-	if (lock->context == DRM_KERNEL_CONTEXT) {
-		DRM_ERROR("Process %d using kernel context %d\n",
-			  task_pid_nr(current), lock->context);
-		return -EINVAL;
-	}
-
-	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
-		  lock->context, task_pid_nr(current),
-		  master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
-		  lock->flags);
-
-	add_wait_queue(&master->lock.lock_queue, &entry);
-	spin_lock_bh(&master->lock.spinlock);
-	master->lock.user_waiters++;
-	spin_unlock_bh(&master->lock.spinlock);
-
-	for (;;) {
-		__set_current_state(TASK_INTERRUPTIBLE);
-		if (!master->lock.hw_lock) {
-			/* Device has been unregistered */
-			send_sig(SIGTERM, current, 0);
-			ret = -EINTR;
-			break;
-		}
-		if (drm_lock_take(&master->lock, lock->context)) {
-			master->lock.file_priv = file_priv;
-			master->lock.lock_time = jiffies;
-			break;	/* Got lock */
-		}
-
-		/* Contention */
-		mutex_unlock(&drm_global_mutex);
-		schedule();
-		mutex_lock(&drm_global_mutex);
-		if (signal_pending(current)) {
-			ret = -EINTR;
-			break;
-		}
-	}
-	spin_lock_bh(&master->lock.spinlock);
-	master->lock.user_waiters--;
-	spin_unlock_bh(&master->lock.spinlock);
-	__set_current_state(TASK_RUNNING);
-	remove_wait_queue(&master->lock.lock_queue, &entry);
-
-	DRM_DEBUG("%d %s\n", lock->context,
-		  ret ? "interrupted" : "has lock");
-	if (ret) return ret;
-
-	/* don't set the block all signals on the master process for now 
-	 * really probably not the correct answer but lets us debug xkb
- 	 * xserver for now */
-	if (!drm_is_current_master(file_priv)) {
-		dev->sigdata.context = lock->context;
-		dev->sigdata.lock = master->lock.hw_lock;
-	}
-
-	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
-	{
-		if (dev->driver->dma_quiescent(dev)) {
-			DRM_DEBUG("%d waiting for DMA quiescent\n",
-				  lock->context);
-			return -EBUSY;
-		}
-	}
-
-	return 0;
-}
-
-/*
- * Unlock ioctl.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_lock structure.
- * \return zero on success or negative number on failure.
- *
- * Transfer and free the lock.
- */
-int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	struct drm_lock *lock = data;
-	struct drm_master *master = file_priv->master;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (lock->context == DRM_KERNEL_CONTEXT) {
-		DRM_ERROR("Process %d using kernel context %d\n",
-			  task_pid_nr(current), lock->context);
-		return -EINVAL;
-	}
-
-	if (drm_legacy_lock_free(&master->lock, lock->context)) {
-		/* FIXME: Should really bail out here. */
-	}
-
-	return 0;
-}
-
-/*
- * This function returns immediately and takes the hw lock
- * with the kernel context if it is free, otherwise it gets the highest priority when and if
- * it is eventually released.
- *
- * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
- * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
- * a deadlock, which is why the "idlelock" was invented).
- *
- * This should be sufficient to wait for GPU idle without
- * having to worry about starvation.
- */
-void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
-{
-	int ret;
-
-	spin_lock_bh(&lock_data->spinlock);
-	lock_data->kernel_waiters++;
-	if (!lock_data->idle_has_lock) {
-
-		spin_unlock_bh(&lock_data->spinlock);
-		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
-		spin_lock_bh(&lock_data->spinlock);
-
-		if (ret == 1)
-			lock_data->idle_has_lock = 1;
-	}
-	spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_take);
-
-void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
-{
-	unsigned int old, prev;
-	volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-	spin_lock_bh(&lock_data->spinlock);
-	if (--lock_data->kernel_waiters == 0) {
-		if (lock_data->idle_has_lock) {
-			do {
-				old = *lock;
-				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
-			} while (prev != old);
-			wake_up_interruptible(&lock_data->lock_queue);
-			lock_data->idle_has_lock = 0;
-		}
-	}
-	spin_unlock_bh(&lock_data->spinlock);
-}
-EXPORT_SYMBOL(drm_legacy_idlelock_release);
-
-static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
-				     struct drm_file *file_priv)
-{
-	struct drm_master *master = file_priv->master;
-
-	return (file_priv->lock_count && master->lock.hw_lock &&
-		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
-		master->lock.file_priv == file_priv);
-}
-
-void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
-{
-	struct drm_file *file_priv = filp->private_data;
-
-	/* if the master has gone away we can't do anything with the lock */
-	if (!dev->master)
-		return;
-
-	if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
-		DRM_DEBUG("File %p released, freeing lock for context %d\n",
-			  filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
-		drm_legacy_lock_free(&file_priv->master->lock,
-				     _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
-	}
-}
-
-void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
-{
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return;
-
-	/*
-	 * Since the master is disappearing, so is the
-	 * possibility to lock.
-	 */
-	mutex_lock(&dev->struct_mutex);
-	if (master->lock.hw_lock) {
-		if (dev->sigdata.lock == master->lock.hw_lock)
-			dev->sigdata.lock = NULL;
-		master->lock.hw_lock = NULL;
-		master->lock.file_priv = NULL;
-		wake_up_interruptible_all(&master->lock.lock_queue);
-	}
-	mutex_unlock(&dev->struct_mutex);
-}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
deleted file mode 100644
index d2e1dccd8113e0dd98e5b506c6c4ec0da073badc..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_memory.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * \file drm_memory.c
- * Memory management wrappers for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/highmem.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_cache.h>
-#include <drm/drm_device.h>
-
-#include "drm_legacy.h"
-
-#if IS_ENABLED(CONFIG_AGP)
-
-#ifdef HAVE_PAGE_AGP
-# include <asm/agp.h>
-#else
-# ifdef __powerpc__
-#  define PAGE_AGP	pgprot_noncached_wc(PAGE_KERNEL)
-# else
-#  define PAGE_AGP	PAGE_KERNEL
-# endif
-#endif
-
-static void *agp_remap(unsigned long offset, unsigned long size,
-		       struct drm_device *dev)
-{
-	unsigned long i, num_pages =
-	    PAGE_ALIGN(size) / PAGE_SIZE;
-	struct drm_agp_mem *agpmem;
-	struct page **page_map;
-	struct page **phys_page_map;
-	void *addr;
-
-	size = PAGE_ALIGN(size);
-
-#ifdef __alpha__
-	offset -= dev->hose->mem_space->start;
-#endif
-
-	list_for_each_entry(agpmem, &dev->agp->memory, head)
-		if (agpmem->bound <= offset
-		    && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
-		    (offset + size))
-			break;
-	if (&agpmem->head == &dev->agp->memory)
-		return NULL;
-
-	/*
-	 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
-	 * the CPU do not get remapped by the GART.  We fix this by using the kernel's
-	 * page-table instead (that's probably faster anyhow...).
-	 */
-	/* note: use vmalloc() because num_pages could be large... */
-	page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
-	if (!page_map)
-		return NULL;
-
-	phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
-	for (i = 0; i < num_pages; ++i)
-		page_map[i] = phys_page_map[i];
-	addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
-	vfree(page_map);
-
-	return addr;
-}
-
-#else /*  CONFIG_AGP  */
-static inline void *agp_remap(unsigned long offset, unsigned long size,
-			      struct drm_device *dev)
-{
-	return NULL;
-}
-
-#endif /* CONFIG_AGP */
-
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
-{
-	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
-		map->handle = agp_remap(map->offset, map->size, dev);
-	else
-		map->handle = ioremap(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap);
-
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
-{
-	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
-		map->handle = agp_remap(map->offset, map->size, dev);
-	else
-		map->handle = ioremap_wc(map->offset, map->size);
-}
-EXPORT_SYMBOL(drm_legacy_ioremap_wc);
-
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
-{
-	if (!map->handle || !map->size)
-		return;
-
-	if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
-		vunmap(map->handle);
-	else
-		iounmap(map->handle);
-}
-EXPORT_SYMBOL(drm_legacy_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 14201f73aab1340ac8cc8f050f742a419588e369..843a6dbda93a07724ce3271584746c34ffd29747 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -347,7 +347,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
 {
 	struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
 
-	mipi_dsi_detach(dsi);
+	if (dsi->attached)
+		mipi_dsi_detach(dsi);
 	mipi_dsi_device_unregister(dsi);
 
 	return 0;
@@ -370,11 +371,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
 int mipi_dsi_attach(struct mipi_dsi_device *dsi)
 {
 	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+	int ret;
 
 	if (!ops || !ops->attach)
 		return -ENOSYS;
 
-	return ops->attach(dsi->host, dsi);
+	ret = ops->attach(dsi->host, dsi);
+	if (ret)
+		return ret;
+
+	dsi->attached = true;
+
+	return 0;
 }
 EXPORT_SYMBOL(mipi_dsi_attach);
 
@@ -386,9 +394,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
 {
 	const struct mipi_dsi_host_ops *ops = dsi->host->ops;
 
+	if (WARN_ON(!dsi->attached))
+		return -EINVAL;
+
 	if (!ops || !ops->detach)
 		return -ENOSYS;
 
+	dsi->attached = false;
+
 	return ops->detach(dsi->host, dsi);
 }
 EXPORT_SYMBOL(mipi_dsi_detach);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index ac0d2ce3f870411b3a31495bfdcfa16f7e82705c..0e8355063eee36a7d9166fc6c5ccc26a279dcb86 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -538,7 +538,7 @@ static int set_property_atomic(struct drm_mode_object *obj,
 						       obj_to_connector(obj),
 						       prop_value);
 	} else {
-		ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
+		ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, false);
 		if (ret)
 			goto out;
 		ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 39d35fc3a43bcfaeb89ab55b6df67d79e666088e..c585f1e8803ec23ddbfd51dcd90fcca75b826689 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -29,18 +29,12 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 
+#include <drm/drm_auth.h>
 #include <drm/drm.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_print.h>
 
 #include "drm_internal.h"
-#include "drm_legacy.h"
-
-#ifdef CONFIG_DRM_LEGACY
-/* List of devices hanging off drivers with stealth attach. */
-static LIST_HEAD(legacy_dev_list);
-static DEFINE_MUTEX(legacy_dev_list_lock);
-#endif
 
 static int drm_get_pci_domain(struct drm_device *dev)
 {
@@ -71,199 +65,3 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
 	master->unique_len = strlen(master->unique);
 	return 0;
 }
-
-#ifdef CONFIG_DRM_LEGACY
-
-static int drm_legacy_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
-{
-	struct pci_dev *pdev = to_pci_dev(dev->dev);
-
-	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
-	    (p->busnum & 0xff) != pdev->bus->number ||
-	    p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
-		return -EINVAL;
-
-	p->irq = pdev->irq;
-
-	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
-		  p->irq);
-	return 0;
-}
-
-/**
- * drm_legacy_irq_by_busid - Get interrupt from bus ID
- * @dev: DRM device
- * @data: IOCTL parameter pointing to a drm_irq_busid structure
- * @file_priv: DRM file private.
- *
- * Finds the PCI device with the specified bus id and gets its IRQ number.
- * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
- * to that of the device that this DRM instance attached to.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv)
-{
-	struct drm_irq_busid *p = data;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	/* UMS was only ever support on PCI devices. */
-	if (WARN_ON(!dev_is_pci(dev->dev)))
-		return -EINVAL;
-
-	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-		return -EOPNOTSUPP;
-
-	return drm_legacy_pci_irq_by_busid(dev, p);
-}
-
-void drm_legacy_pci_agp_destroy(struct drm_device *dev)
-{
-	if (dev->agp) {
-		arch_phys_wc_del(dev->agp->agp_mtrr);
-		drm_legacy_agp_clear(dev);
-		kfree(dev->agp);
-		dev->agp = NULL;
-	}
-}
-
-static void drm_legacy_pci_agp_init(struct drm_device *dev)
-{
-	if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
-		if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
-			dev->agp = drm_legacy_agp_init(dev);
-		if (dev->agp) {
-			dev->agp->agp_mtrr = arch_phys_wc_add(
-				dev->agp->agp_info.aper_base,
-				dev->agp->agp_info.aper_size *
-				1024 * 1024);
-		}
-	}
-}
-
-static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
-				  const struct pci_device_id *ent,
-				  const struct drm_driver *driver)
-{
-	struct drm_device *dev;
-	int ret;
-
-	DRM_DEBUG("\n");
-
-	dev = drm_dev_alloc(driver, &pdev->dev);
-	if (IS_ERR(dev))
-		return PTR_ERR(dev);
-
-	ret = pci_enable_device(pdev);
-	if (ret)
-		goto err_free;
-
-#ifdef __alpha__
-	dev->hose = pdev->sysdata;
-#endif
-
-	drm_legacy_pci_agp_init(dev);
-
-	ret = drm_dev_register(dev, ent->driver_data);
-	if (ret)
-		goto err_agp;
-
-	if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
-		mutex_lock(&legacy_dev_list_lock);
-		list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
-		mutex_unlock(&legacy_dev_list_lock);
-	}
-
-	return 0;
-
-err_agp:
-	drm_legacy_pci_agp_destroy(dev);
-	pci_disable_device(pdev);
-err_free:
-	drm_dev_put(dev);
-	return ret;
-}
-
-/**
- * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * This is only used by legacy dri1 drivers and deprecated.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_legacy_pci_init(const struct drm_driver *driver,
-			struct pci_driver *pdriver)
-{
-	struct pci_dev *pdev = NULL;
-	const struct pci_device_id *pid;
-	int i;
-
-	DRM_DEBUG("\n");
-
-	if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
-		return -EINVAL;
-
-	/* If not using KMS, fall back to stealth mode manual scanning. */
-	for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
-		pid = &pdriver->id_table[i];
-
-		/* Loop around setting up a DRM device for each PCI device
-		 * matching our ID and device class.  If we had the internal
-		 * function that pci_get_subsys and pci_get_class used, we'd
-		 * be able to just pass pid in instead of doing a two-stage
-		 * thing.
-		 */
-		pdev = NULL;
-		while ((pdev =
-			pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
-				       pid->subdevice, pdev)) != NULL) {
-			if ((pdev->class & pid->class_mask) != pid->class)
-				continue;
-
-			/* stealth mode requires a manual probe */
-			pci_dev_get(pdev);
-			drm_legacy_get_pci_dev(pdev, pid, driver);
-		}
-	}
-	return 0;
-}
-EXPORT_SYMBOL(drm_legacy_pci_init);
-
-/**
- * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
- * @driver: DRM device driver
- * @pdriver: PCI device driver
- *
- * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
- * is deprecated and only used by dri1 drivers.
- */
-void drm_legacy_pci_exit(const struct drm_driver *driver,
-			 struct pci_driver *pdriver)
-{
-	struct drm_device *dev, *tmp;
-
-	DRM_DEBUG("\n");
-
-	if (!(driver->driver_features & DRIVER_LEGACY)) {
-		WARN_ON(1);
-	} else {
-		mutex_lock(&legacy_dev_list_lock);
-		list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
-					 legacy_dev_list) {
-			if (dev->driver == driver) {
-				list_del(&dev->legacy_dev_list);
-				drm_put_dev(dev);
-			}
-		}
-		mutex_unlock(&legacy_dev_list_lock);
-	}
-	DRM_INFO("Module unloaded\n");
-}
-EXPORT_SYMBOL(drm_legacy_pci_exit);
-
-#endif
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 24e7998d17313e1b2a19045fe589c00a3a1e9cdc..9e8e4c60983d60fb07bdf6b314db7cfc23ac38c2 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -230,6 +230,103 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
 	return 0;
 }
 
+/**
+ * DOC: hotspot properties
+ *
+ * HOTSPOT_X: property to set mouse hotspot x offset.
+ * HOTSPOT_Y: property to set mouse hotspot y offset.
+ *
+ * When the plane is being used as a cursor image to display a mouse pointer,
+ * the "hotspot" is the offset within the cursor image where mouse events
+ * are expected to go.
+ *
+ * Positive values move the hotspot from the top-left corner of the cursor
+ * plane towards the right and bottom.
+ *
+ * Most display drivers do not need this information because the
+ * hotspot is not actually connected to anything visible on screen.
+ * However, this is necessary for display drivers like the para-virtualized
+ * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console
+ * with a mouse pointer.  Since these consoles are often being remoted over a
+ * network, they would otherwise have to wait to display the pointer movement to
+ * the user until a full network round-trip has occurred.  New mouse events have
+ * to be sent from the user's console, over the network to the virtual input
+ * devices, forwarded to the desktop for processing, and then the cursor plane's
+ * position can be updated and sent back to the user's console over the network.
+ * Instead, with the hotspot information, the console can anticipate the new
+ * location, and draw the mouse cursor there before the confirmation comes in.
+ * To do that correctly, the user's console must be able predict how the
+ * desktop will process mouse events, which normally requires the desktop's
+ * mouse topology information, ie where each CRTC sits in the mouse coordinate
+ * space.  This is typically sent to the para-virtualized drivers using some
+ * driver-specific method, and the driver then forwards it to the console by
+ * way of the virtual display device or hypervisor.
+ *
+ * The assumption is generally made that there is only one cursor plane being
+ * used this way at a time, and that the desktop is feeding all mouse devices
+ * into the same global pointer.  Para-virtualized drivers that require this
+ * should only be exposing a single cursor plane, or find some other way
+ * to coordinate with a userspace desktop that supports multiple pointers.
+ * If the hotspot properties are set, the cursor plane is therefore assumed to be
+ * used only for displaying a mouse cursor image, and the position of the combined
+ * cursor plane + offset can therefore be used for coordinating with input from a
+ * mouse device.
+ *
+ * The cursor will then be drawn either at the location of the plane in the CRTC
+ * console, or as a free-floating cursor plane on the user's console
+ * corresponding to their desktop mouse position.
+ *
+ * DRM clients which would like to work correctly on drivers which expose
+ * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT.
+ * Setting this property on drivers which do not special case
+ * cursor planes will return EOPNOTSUPP, which can be used by userspace to
+ * gauge requirements of the hardware/drivers they're running on. Advertising
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be
+ * correctly setting the hotspot properties.
+ */
+
+/**
+ * drm_plane_create_hotspot_properties - creates the mouse hotspot
+ * properties and attaches them to the given cursor plane
+ *
+ * @plane: drm cursor plane
+ *
+ * This function enables the mouse hotspot property on a given
+ * cursor plane. Look at the documentation for hotspot properties
+ * to get a better understanding for what they're used for.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int drm_plane_create_hotspot_properties(struct drm_plane *plane)
+{
+	struct drm_property *prop_x;
+	struct drm_property *prop_y;
+
+	drm_WARN_ON(plane->dev,
+		    !drm_core_check_feature(plane->dev,
+					    DRIVER_CURSOR_HOTSPOT));
+
+	prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X",
+						  INT_MIN, INT_MAX);
+	if (IS_ERR(prop_x))
+		return PTR_ERR(prop_x);
+
+	prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y",
+						  INT_MIN, INT_MAX);
+	if (IS_ERR(prop_y)) {
+		drm_property_destroy(plane->dev, prop_x);
+		return PTR_ERR(prop_y);
+	}
+
+	drm_object_attach_property(&plane->base, prop_x, 0);
+	drm_object_attach_property(&plane->base, prop_y, 0);
+	plane->hotspot_x_property = prop_x;
+	plane->hotspot_y_property = prop_y;
+
+	return 0;
+}
+
 __printf(9, 0)
 static int __drm_universal_plane_init(struct drm_device *dev,
 				      struct drm_plane *plane,
@@ -348,6 +445,10 @@ static int __drm_universal_plane_init(struct drm_device *dev,
 		drm_object_attach_property(&plane->base, config->prop_src_w, 0);
 		drm_object_attach_property(&plane->base, config->prop_src_h, 0);
 	}
+	if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+	    type == DRM_PLANE_TYPE_CURSOR) {
+		drm_plane_create_hotspot_properties(plane);
+	}
 
 	if (format_modifier_count)
 		create_in_format_blob(dev, plane);
@@ -678,6 +779,19 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
 		    !file_priv->universal_planes)
 			continue;
 
+		/*
+		 * If we're running on a virtualized driver then,
+		 * unless userspace advertizes support for the
+		 * virtualized cursor plane, disable cursor planes
+		 * because they'll be broken due to missing cursor
+		 * hotspot info.
+		 */
+		if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+		    drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) &&
+		    file_priv->atomic &&
+		    !file_priv->supports_virtualized_cursor_plane)
+			continue;
+
 		if (drm_lease_held(file_priv, plane->base.id)) {
 			if (count < plane_resp->count_planes &&
 			    put_user(plane->base.id, plane_ptr + count))
@@ -1052,8 +1166,10 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 				return PTR_ERR(fb);
 			}
 
-			fb->hot_x = req->hot_x;
-			fb->hot_y = req->hot_y;
+			if (plane->hotspot_x_property && plane->state)
+				plane->state->hotspot_x = req->hot_x;
+			if (plane->hotspot_y_property && plane->state)
+				plane->state->hotspot_y = req->hot_y;
 		} else {
 			fb = NULL;
 		}
@@ -1442,6 +1558,36 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
  * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
  * drm_atomic_helper_damage_iter_next() helper iterator function to get damage
  * rectangles clipped to &drm_plane_state.src.
+ *
+ * Note that there are two types of damage handling: frame damage and buffer
+ * damage, the type of damage handling implemented depends on a driver's upload
+ * target. Drivers implementing a per-plane or per-CRTC upload target need to
+ * handle frame damage, while drivers implementing a per-buffer upload target
+ * need to handle buffer damage.
+ *
+ * The existing damage helpers only support the frame damage type, there is no
+ * buffer age support or similar damage accumulation algorithm implemented yet.
+ *
+ * Only drivers handling frame damage can use the mentioned damage helpers to
+ * iterate over the damaged regions. Drivers that handle buffer damage, must set
+ * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init()
+ * to know that damage clips should be ignored and return &drm_plane_state.src
+ * as the damage rectangle, to force a full plane update.
+ *
+ * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb
+ * of the old and new plane states to determine if the framebuffer attached to a
+ * plane has changed or not since the last plane update. If &drm_plane_state.fb
+ * has changed, then &drm_plane_state.ignore_damage_clips must be set to true.
+ *
+ * That is because drivers with a per-plane upload target, expect the backing
+ * storage buffer to not change for a given plane. If the upload buffer changes
+ * between page flips, the new upload buffer has to be updated as a whole. This
+ * can be improved in the future if support for frame damage is added to the DRM
+ * damage helpers, similarly to how user-space already handle this case as it is
+ * explained in the following documents:
+ *
+ *     https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt
+ *     https://emersion.fr/blog/2019/intro-to-damage-tracking/
  */
 
 /**
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 5e95089676ff81ed70562d6828d786603742e6bf..7982be4b0306d54e152c27cb87c7be82234feca1 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -279,35 +279,3 @@ void drm_plane_helper_destroy(struct drm_plane *plane)
 	kfree(plane);
 }
 EXPORT_SYMBOL(drm_plane_helper_destroy);
-
-/**
- * drm_plane_helper_atomic_check() - Helper to check plane atomic-state
- * @plane: plane to check
- * @state: atomic state object
- *
- * Provides a default plane-state check handler for planes whose atomic-state
- * scale and positioning are not expected to change since the plane is always
- * a fullscreen scanout buffer.
- *
- * This is often the case for the primary plane of simple framebuffers. See
- * also drm_crtc_helper_atomic_check() for the respective CRTC-state check
- * helper function.
- *
- * RETURNS:
- * Zero on success, or an errno code otherwise.
- */
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
-{
-	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
-	struct drm_crtc *new_crtc = new_plane_state->crtc;
-	struct drm_crtc_state *new_crtc_state = NULL;
-
-	if (new_crtc)
-		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
-	return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
-						   DRM_PLANE_NO_SCALING,
-						   DRM_PLANE_NO_SCALING,
-						   false, false);
-}
-EXPORT_SYMBOL(drm_plane_helper_atomic_check);
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
deleted file mode 100644
index f4e6184d187721bad4c96c05481d18774ed2791d..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_scatter.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * \file drm_scatter.c
- * IOCTLs to manage scatter/gather memory
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_print.h>
-
-#include "drm_legacy.h"
-
-#define DEBUG_SCATTER 0
-
-static void drm_sg_cleanup(struct drm_sg_mem * entry)
-{
-	struct page *page;
-	int i;
-
-	for (i = 0; i < entry->pages; i++) {
-		page = entry->pagelist[i];
-		if (page)
-			ClearPageReserved(page);
-	}
-
-	vfree(entry->virtual);
-
-	kfree(entry->busaddr);
-	kfree(entry->pagelist);
-	kfree(entry);
-}
-
-void drm_legacy_sg_cleanup(struct drm_device *dev)
-{
-	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
-	    drm_core_check_feature(dev, DRIVER_LEGACY)) {
-		drm_sg_cleanup(dev->sg);
-		dev->sg = NULL;
-	}
-}
-#ifdef _LP64
-# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
-#else
-# define ScatterHandle(x) (unsigned int)(x)
-#endif
-
-int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
-{
-	struct drm_scatter_gather *request = data;
-	struct drm_sg_mem *entry;
-	unsigned long pages, i, j;
-
-	DRM_DEBUG("\n");
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_SG))
-		return -EOPNOTSUPP;
-
-	if (request->size > SIZE_MAX - PAGE_SIZE)
-		return -EINVAL;
-
-	if (dev->sg)
-		return -EINVAL;
-
-	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		return -ENOMEM;
-
-	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
-	DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
-
-	entry->pages = pages;
-	entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
-	if (!entry->pagelist) {
-		kfree(entry);
-		return -ENOMEM;
-	}
-
-	entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
-	if (!entry->busaddr) {
-		kfree(entry->pagelist);
-		kfree(entry);
-		return -ENOMEM;
-	}
-
-	entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
-	if (!entry->virtual) {
-		kfree(entry->busaddr);
-		kfree(entry->pagelist);
-		kfree(entry);
-		return -ENOMEM;
-	}
-
-	/* This also forces the mapping of COW pages, so our page list
-	 * will be valid.  Please don't remove it...
-	 */
-	memset(entry->virtual, 0, pages << PAGE_SHIFT);
-
-	entry->handle = ScatterHandle((unsigned long)entry->virtual);
-
-	DRM_DEBUG("handle  = %08lx\n", entry->handle);
-	DRM_DEBUG("virtual = %p\n", entry->virtual);
-
-	for (i = (unsigned long)entry->virtual, j = 0; j < pages;
-	     i += PAGE_SIZE, j++) {
-		entry->pagelist[j] = vmalloc_to_page((void *)i);
-		if (!entry->pagelist[j])
-			goto failed;
-		SetPageReserved(entry->pagelist[j]);
-	}
-
-	request->handle = entry->handle;
-
-	dev->sg = entry;
-
-#if DEBUG_SCATTER
-	/* Verify that each page points to its virtual address, and vice
-	 * versa.
-	 */
-	{
-		int error = 0;
-
-		for (i = 0; i < pages; i++) {
-			unsigned long *tmp;
-
-			tmp = page_address(entry->pagelist[i]);
-			for (j = 0;
-			     j < PAGE_SIZE / sizeof(unsigned long);
-			     j++, tmp++) {
-				*tmp = 0xcafebabe;
-			}
-			tmp = (unsigned long *)((u8 *) entry->virtual +
-						(PAGE_SIZE * i));
-			for (j = 0;
-			     j < PAGE_SIZE / sizeof(unsigned long);
-			     j++, tmp++) {
-				if (*tmp != 0xcafebabe && error == 0) {
-					error = 1;
-					DRM_ERROR("Scatter allocation error, "
-						  "pagelist does not match "
-						  "virtual mapping\n");
-				}
-			}
-			tmp = page_address(entry->pagelist[i]);
-			for (j = 0;
-			     j < PAGE_SIZE / sizeof(unsigned long);
-			     j++, tmp++) {
-				*tmp = 0;
-			}
-		}
-		if (error == 0)
-			DRM_ERROR("Scatter allocation matches pagelist\n");
-	}
-#endif
-
-	return 0;
-
-      failed:
-	drm_sg_cleanup(entry);
-	return -ENOMEM;
-}
-
-int drm_legacy_sg_free(struct drm_device *dev, void *data,
-		       struct drm_file *file_priv)
-{
-	struct drm_scatter_gather *request = data;
-	struct drm_sg_mem *entry;
-
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return -EOPNOTSUPP;
-
-	if (!drm_core_check_feature(dev, DRIVER_SG))
-		return -EOPNOTSUPP;
-
-	entry = dev->sg;
-	dev->sg = NULL;
-
-	if (!entry || entry->handle != request->handle)
-		return -EINVAL;
-
-	DRM_DEBUG("virtual  = %p\n", entry->virtual);
-
-	drm_sg_cleanup(entry);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 01da6789d0440940c7e754d16e6866746a5614ff..cbb65b7ba4259967745291b2b64884eed056ffec 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -126,6 +126,11 @@
  * synchronize between the two.
  * This requirement is inherited from the Vulkan fence API.
  *
+ * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE is set, the ioctl will also set
+ * a fence deadline hint on the backing fences before waiting, to provide the
+ * fence signaler with an appropriate sense of urgency.  The deadline is
+ * specified as an absolute &CLOCK_MONOTONIC value in units of ns.
+ *
  * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
  * handles as well as an array of u64 points and does a host-side wait on all
  * of syncobj fences at the given points simultaneously.
@@ -1027,7 +1032,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 						  uint32_t count,
 						  uint32_t flags,
 						  signed long timeout,
-						  uint32_t *idx)
+						  uint32_t *idx,
+						  ktime_t *deadline)
 {
 	struct syncobj_wait_entry *entries;
 	struct dma_fence *fence;
@@ -1108,6 +1114,15 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 			drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
 	}
 
+	if (deadline) {
+		for (i = 0; i < count; ++i) {
+			fence = entries[i].fence;
+			if (!fence)
+				continue;
+			dma_fence_set_deadline(fence, *deadline);
+		}
+	}
+
 	do {
 		set_current_state(TASK_INTERRUPTIBLE);
 
@@ -1206,7 +1221,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
 				  struct drm_file *file_private,
 				  struct drm_syncobj_wait *wait,
 				  struct drm_syncobj_timeline_wait *timeline_wait,
-				  struct drm_syncobj **syncobjs, bool timeline)
+				  struct drm_syncobj **syncobjs, bool timeline,
+				  ktime_t *deadline)
 {
 	signed long timeout = 0;
 	uint32_t first = ~0;
@@ -1217,7 +1233,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
 							 NULL,
 							 wait->count_handles,
 							 wait->flags,
-							 timeout, &first);
+							 timeout, &first,
+							 deadline);
 		if (timeout < 0)
 			return timeout;
 		wait->first_signaled = first;
@@ -1227,7 +1244,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
 							 u64_to_user_ptr(timeline_wait->points),
 							 timeline_wait->count_handles,
 							 timeline_wait->flags,
-							 timeout, &first);
+							 timeout, &first,
+							 deadline);
 		if (timeout < 0)
 			return timeout;
 		timeline_wait->first_signaled = first;
@@ -1298,17 +1316,22 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_syncobj_wait *args = data;
 	struct drm_syncobj **syncobjs;
+	unsigned int possible_flags;
+	ktime_t t, *tp = NULL;
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
 		return -EOPNOTSUPP;
 
-	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
-			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+	possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+			 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+			 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+	if (args->flags & ~possible_flags)
 		return -EINVAL;
 
 	if (args->count_handles == 0)
-		return -EINVAL;
+		return 0;
 
 	ret = drm_syncobj_array_find(file_private,
 				     u64_to_user_ptr(args->handles),
@@ -1317,8 +1340,13 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
 	if (ret < 0)
 		return ret;
 
+	if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+		t = ns_to_ktime(args->deadline_nsec);
+		tp = &t;
+	}
+
 	ret = drm_syncobj_array_wait(dev, file_private,
-				     args, NULL, syncobjs, false);
+				     args, NULL, syncobjs, false, tp);
 
 	drm_syncobj_array_free(syncobjs, args->count_handles);
 
@@ -1331,18 +1359,23 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_syncobj_timeline_wait *args = data;
 	struct drm_syncobj **syncobjs;
+	unsigned int possible_flags;
+	ktime_t t, *tp = NULL;
 	int ret = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
 		return -EOPNOTSUPP;
 
-	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
-			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
-			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+	possible_flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+			 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+			 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE |
+			 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE;
+
+	if (args->flags & ~possible_flags)
 		return -EINVAL;
 
 	if (args->count_handles == 0)
-		return -EINVAL;
+		return 0;
 
 	ret = drm_syncobj_array_find(file_private,
 				     u64_to_user_ptr(args->handles),
@@ -1351,8 +1384,13 @@ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
 	if (ret < 0)
 		return ret;
 
+	if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE) {
+		t = ns_to_ktime(args->deadline_nsec);
+		tp = &t;
+	}
+
 	ret = drm_syncobj_array_wait(dev, file_private,
-				     NULL, args, syncobjs, true);
+				     NULL, args, syncobjs, true, tp);
 
 	drm_syncobj_array_free(syncobjs, args->count_handles);
 
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 877e2067534fa215ba16fe16363b04e84e0ad7e1..702a12bc93bd9b145044ac84bd464d03bfea170f 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -210,11 +210,6 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 		if (crtc->funcs->get_vblank_counter)
 			return crtc->funcs->get_vblank_counter(crtc);
 	}
-#ifdef CONFIG_DRM_LEGACY
-	else if (dev->driver->get_vblank_counter) {
-		return dev->driver->get_vblank_counter(dev, pipe);
-	}
-#endif
 
 	return drm_vblank_no_hw_counter(dev, pipe);
 }
@@ -433,11 +428,6 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
 		if (crtc->funcs->disable_vblank)
 			crtc->funcs->disable_vblank(crtc);
 	}
-#ifdef CONFIG_DRM_LEGACY
-	else {
-		dev->driver->disable_vblank(dev, pipe);
-	}
-#endif
 }
 
 /*
@@ -1151,11 +1141,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
 		if (crtc->funcs->enable_vblank)
 			return crtc->funcs->enable_vblank(crtc);
 	}
-#ifdef CONFIG_DRM_LEGACY
-	else if (dev->driver->enable_vblank) {
-		return dev->driver->enable_vblank(dev, pipe);
-	}
-#endif
 
 	return -EINVAL;
 }
@@ -1574,88 +1559,6 @@ void drm_crtc_vblank_restore(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_vblank_restore);
 
-static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
-					  unsigned int pipe)
-{
-	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
-	/* vblank is not initialized (IRQ not installed ?), or has been freed */
-	if (!drm_dev_has_vblank(dev))
-		return;
-
-	if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
-		return;
-
-	/*
-	 * To avoid all the problems that might happen if interrupts
-	 * were enabled/disabled around or between these calls, we just
-	 * have the kernel take a reference on the CRTC (just once though
-	 * to avoid corrupting the count if multiple, mismatch calls occur),
-	 * so that interrupts remain enabled in the interim.
-	 */
-	if (!vblank->inmodeset) {
-		vblank->inmodeset = 0x1;
-		if (drm_vblank_get(dev, pipe) == 0)
-			vblank->inmodeset |= 0x2;
-	}
-}
-
-static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
-					   unsigned int pipe)
-{
-	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
-	/* vblank is not initialized (IRQ not installed ?), or has been freed */
-	if (!drm_dev_has_vblank(dev))
-		return;
-
-	if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
-		return;
-
-	if (vblank->inmodeset) {
-		spin_lock_irq(&dev->vbl_lock);
-		drm_reset_vblank_timestamp(dev, pipe);
-		spin_unlock_irq(&dev->vbl_lock);
-
-		if (vblank->inmodeset & 0x2)
-			drm_vblank_put(dev, pipe);
-
-		vblank->inmodeset = 0;
-	}
-}
-
-int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
-				 struct drm_file *file_priv)
-{
-	struct drm_modeset_ctl *modeset = data;
-	unsigned int pipe;
-
-	/* If drm_vblank_init() hasn't been called yet, just no-op */
-	if (!drm_dev_has_vblank(dev))
-		return 0;
-
-	/* KMS drivers handle this internally */
-	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
-		return 0;
-
-	pipe = modeset->crtc;
-	if (pipe >= dev->num_crtcs)
-		return -EINVAL;
-
-	switch (modeset->cmd) {
-	case _DRM_PRE_MODESET:
-		drm_legacy_vblank_pre_modeset(dev, pipe);
-		break;
-	case _DRM_POST_MODESET:
-		drm_legacy_vblank_post_modeset(dev, pipe);
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
 static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
 				  u64 req_seq,
 				  union drm_wait_vblank *vblwait,
@@ -1780,10 +1683,6 @@ static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
 
 static bool drm_wait_vblank_supported(struct drm_device *dev)
 {
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	if (unlikely(drm_core_check_feature(dev, DRIVER_LEGACY)))
-		return dev->irq_enabled;
-#endif
 	return drm_dev_has_vblank(dev);
 }
 
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
deleted file mode 100644
index 87c9fe55dec76adf77ff01c8d96e22fe47250e27..0000000000000000000000000000000000000000
--- a/drivers/gpu/drm/drm_vm.c
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- * \file drm_vm.c
- * Memory mapping for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-#include <linux/pgtable.h>
-
-#if defined(__ia64__)
-#include <linux/efi.h>
-#include <linux/slab.h>
-#endif
-#include <linux/mem_encrypt.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-struct drm_vma_entry {
-	struct list_head head;
-	struct vm_area_struct *vma;
-	pid_t pid;
-};
-
-static void drm_vm_open(struct vm_area_struct *vma);
-static void drm_vm_close(struct vm_area_struct *vma);
-
-static pgprot_t drm_io_prot(struct drm_local_map *map,
-			    struct vm_area_struct *vma)
-{
-	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
-    defined(__mips__) || defined(__loongarch__)
-	if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
-		tmp = pgprot_noncached(tmp);
-	else
-		tmp = pgprot_writecombine(tmp);
-#elif defined(__ia64__)
-	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
-				    vma->vm_start))
-		tmp = pgprot_writecombine(tmp);
-	else
-		tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__) || defined(__arm__)
-	tmp = pgprot_noncached(tmp);
-#endif
-	return tmp;
-}
-
-static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
-{
-	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
-
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
-	tmp = pgprot_noncached_wc(tmp);
-#endif
-	return tmp;
-}
-
-/*
- * \c fault method for AGP virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Find the right map and if it's AGP memory find the real physical page to
- * map, get the page, increment the use count and return it.
- */
-#if IS_ENABLED(CONFIG_AGP)
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_local_map *map = NULL;
-	struct drm_map_list *r_list;
-	struct drm_hash_item *hash;
-
-	/*
-	 * Find the right map
-	 */
-	if (!dev->agp)
-		goto vm_fault_error;
-
-	if (!dev->agp || !dev->agp->cant_use_aperture)
-		goto vm_fault_error;
-
-	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
-		goto vm_fault_error;
-
-	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
-	map = r_list->map;
-
-	if (map && map->type == _DRM_AGP) {
-		/*
-		 * Using vm_pgoff as a selector forces us to use this unusual
-		 * addressing scheme.
-		 */
-		resource_size_t offset = vmf->address - vma->vm_start;
-		resource_size_t baddr = map->offset + offset;
-		struct drm_agp_mem *agpmem;
-		struct page *page;
-
-#ifdef __alpha__
-		/*
-		 * Adjust to a bus-relative address
-		 */
-		baddr -= dev->hose->mem_space->start;
-#endif
-
-		/*
-		 * It's AGP memory - find the real physical page to map
-		 */
-		list_for_each_entry(agpmem, &dev->agp->memory, head) {
-			if (agpmem->bound <= baddr &&
-			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
-				break;
-		}
-
-		if (&agpmem->head == &dev->agp->memory)
-			goto vm_fault_error;
-
-		/*
-		 * Get the page, inc the use count, and return it
-		 */
-		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
-		page = agpmem->memory->pages[offset];
-		get_page(page);
-		vmf->page = page;
-
-		DRM_DEBUG
-		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
-		     (unsigned long long)baddr,
-		     agpmem->memory->pages[offset],
-		     (unsigned long long)offset,
-		     page_count(page));
-		return 0;
-	}
-vm_fault_error:
-	return VM_FAULT_SIGBUS;	/* Disallow mremap */
-}
-#else
-static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
-{
-	return VM_FAULT_SIGBUS;
-}
-#endif
-
-/*
- * \c nopage method for shared virtual memory.
- *
- * \param vma virtual memory area.
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Get the mapping, find the real physical page to map, get the page, and
- * return it.
- */
-static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct drm_local_map *map = vma->vm_private_data;
-	unsigned long offset;
-	unsigned long i;
-	struct page *page;
-
-	if (!map)
-		return VM_FAULT_SIGBUS;	/* Nothing allocated */
-
-	offset = vmf->address - vma->vm_start;
-	i = (unsigned long)map->handle + offset;
-	page = vmalloc_to_page((void *)i);
-	if (!page)
-		return VM_FAULT_SIGBUS;
-	get_page(page);
-	vmf->page = page;
-
-	DRM_DEBUG("shm_fault 0x%lx\n", offset);
-	return 0;
-}
-
-/*
- * \c close method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Deletes map information if we are the last
- * person to close a mapping and it's not in the global maplist.
- */
-static void drm_vm_shm_close(struct vm_area_struct *vma)
-{
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_vma_entry *pt, *temp;
-	struct drm_local_map *map;
-	struct drm_map_list *r_list;
-	int found_maps = 0;
-
-	DRM_DEBUG("0x%08lx,0x%08lx\n",
-		  vma->vm_start, vma->vm_end - vma->vm_start);
-
-	map = vma->vm_private_data;
-
-	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
-		if (pt->vma->vm_private_data == map)
-			found_maps++;
-		if (pt->vma == vma) {
-			list_del(&pt->head);
-			kfree(pt);
-		}
-	}
-
-	/* We were the only map that was found */
-	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
-		/* Check to see if we are in the maplist, if we are not, then
-		 * we delete this mappings information.
-		 */
-		found_maps = 0;
-		list_for_each_entry(r_list, &dev->maplist, head) {
-			if (r_list->map == map)
-				found_maps++;
-		}
-
-		if (!found_maps) {
-			switch (map->type) {
-			case _DRM_REGISTERS:
-			case _DRM_FRAME_BUFFER:
-				arch_phys_wc_del(map->mtrr);
-				iounmap(map->handle);
-				break;
-			case _DRM_SHM:
-				vfree(map->handle);
-				break;
-			case _DRM_AGP:
-			case _DRM_SCATTER_GATHER:
-				break;
-			case _DRM_CONSISTENT:
-				dma_free_coherent(dev->dev,
-						  map->size,
-						  map->handle,
-						  map->offset);
-				break;
-			}
-			kfree(map);
-		}
-	}
-	mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * \c fault method for DMA virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
- */
-static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_device_dma *dma = dev->dma;
-	unsigned long offset;
-	unsigned long page_nr;
-	struct page *page;
-
-	if (!dma)
-		return VM_FAULT_SIGBUS;	/* Error */
-	if (!dma->pagelist)
-		return VM_FAULT_SIGBUS;	/* Nothing allocated */
-
-	offset = vmf->address - vma->vm_start;
-					/* vm_[pg]off[set] should be 0 */
-	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
-	page = virt_to_page((void *)dma->pagelist[page_nr]);
-
-	get_page(page);
-	vmf->page = page;
-
-	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
-	return 0;
-}
-
-/*
- * \c fault method for scatter-gather virtual memory.
- *
- * \param address access address.
- * \return pointer to the page structure.
- *
- * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
- */
-static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
-{
-	struct vm_area_struct *vma = vmf->vma;
-	struct drm_local_map *map = vma->vm_private_data;
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_sg_mem *entry = dev->sg;
-	unsigned long offset;
-	unsigned long map_offset;
-	unsigned long page_offset;
-	struct page *page;
-
-	if (!entry)
-		return VM_FAULT_SIGBUS;	/* Error */
-	if (!entry->pagelist)
-		return VM_FAULT_SIGBUS;	/* Nothing allocated */
-
-	offset = vmf->address - vma->vm_start;
-	map_offset = map->offset - (unsigned long)dev->sg->virtual;
-	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
-	page = entry->pagelist[page_offset];
-	get_page(page);
-	vmf->page = page;
-
-	return 0;
-}
-
-/** AGP virtual memory operations */
-static const struct vm_operations_struct drm_vm_ops = {
-	.fault = drm_vm_fault,
-	.open = drm_vm_open,
-	.close = drm_vm_close,
-};
-
-/** Shared virtual memory operations */
-static const struct vm_operations_struct drm_vm_shm_ops = {
-	.fault = drm_vm_shm_fault,
-	.open = drm_vm_open,
-	.close = drm_vm_shm_close,
-};
-
-/** DMA virtual memory operations */
-static const struct vm_operations_struct drm_vm_dma_ops = {
-	.fault = drm_vm_dma_fault,
-	.open = drm_vm_open,
-	.close = drm_vm_close,
-};
-
-/** Scatter-gather virtual memory operations */
-static const struct vm_operations_struct drm_vm_sg_ops = {
-	.fault = drm_vm_sg_fault,
-	.open = drm_vm_open,
-	.close = drm_vm_close,
-};
-
-static void drm_vm_open_locked(struct drm_device *dev,
-			       struct vm_area_struct *vma)
-{
-	struct drm_vma_entry *vma_entry;
-
-	DRM_DEBUG("0x%08lx,0x%08lx\n",
-		  vma->vm_start, vma->vm_end - vma->vm_start);
-
-	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
-	if (vma_entry) {
-		vma_entry->vma = vma;
-		vma_entry->pid = current->pid;
-		list_add(&vma_entry->head, &dev->vmalist);
-	}
-}
-
-static void drm_vm_open(struct vm_area_struct *vma)
-{
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
-
-	mutex_lock(&dev->struct_mutex);
-	drm_vm_open_locked(dev, vma);
-	mutex_unlock(&dev->struct_mutex);
-}
-
-static void drm_vm_close_locked(struct drm_device *dev,
-				struct vm_area_struct *vma)
-{
-	struct drm_vma_entry *pt, *temp;
-
-	DRM_DEBUG("0x%08lx,0x%08lx\n",
-		  vma->vm_start, vma->vm_end - vma->vm_start);
-
-	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
-		if (pt->vma == vma) {
-			list_del(&pt->head);
-			kfree(pt);
-			break;
-		}
-	}
-}
-
-/*
- * \c close method for all virtual memory types.
- *
- * \param vma virtual memory area.
- *
- * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
- * free it.
- */
-static void drm_vm_close(struct vm_area_struct *vma)
-{
-	struct drm_file *priv = vma->vm_file->private_data;
-	struct drm_device *dev = priv->minor->dev;
-
-	mutex_lock(&dev->struct_mutex);
-	drm_vm_close_locked(dev, vma);
-	mutex_unlock(&dev->struct_mutex);
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * Sets the virtual memory area operations structure to vm_dma_ops, the file
- * pointer, and calls vm_open().
- */
-static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
-{
-	struct drm_file *priv = filp->private_data;
-	struct drm_device *dev;
-	struct drm_device_dma *dma;
-	unsigned long length = vma->vm_end - vma->vm_start;
-
-	dev = priv->minor->dev;
-	dma = dev->dma;
-	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
-		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
-	/* Length must match exact page count */
-	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
-		return -EINVAL;
-	}
-
-	if (!capable(CAP_SYS_ADMIN) &&
-	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
-		vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
-		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
-		/* Ye gads this is ugly.  With more thought
-		   we could move this up higher and use
-		   `protection_map' instead.  */
-		vma->vm_page_prot =
-		    __pgprot(pte_val
-			     (pte_wrprotect
-			      (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
-	}
-
-	vma->vm_ops = &drm_vm_dma_ops;
-
-	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
-	drm_vm_open_locked(dev, vma);
-	return 0;
-}
-
-static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
-{
-#ifdef __alpha__
-	return dev->hose->dense_mem_base;
-#else
-	return 0;
-#endif
-}
-
-/*
- * mmap DMA memory.
- *
- * \param file_priv DRM file private.
- * \param vma virtual memory area.
- * \return zero on success or a negative number on failure.
- *
- * If the virtual memory area has no offset associated with it then it's a DMA
- * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
- * checks that the restricted flag is not set, sets the virtual memory operations
- * according to the mapping type and remaps the pages. Finally sets the file
- * pointer and calls vm_open().
- */
-static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
-{
-	struct drm_file *priv = filp->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_local_map *map = NULL;
-	resource_size_t offset = 0;
-	struct drm_hash_item *hash;
-
-	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
-		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
-
-	if (!priv->authenticated)
-		return -EACCES;
-
-	/* We check for "dma". On Apple's UniNorth, it's valid to have
-	 * the AGP mapped at physical address 0
-	 * --BenH.
-	 */
-	if (!vma->vm_pgoff
-#if IS_ENABLED(CONFIG_AGP)
-	    && (!dev->agp
-		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
-#endif
-	    )
-		return drm_mmap_dma(filp, vma);
-
-	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
-		DRM_ERROR("Could not find map\n");
-		return -EINVAL;
-	}
-
-	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
-	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
-		return -EPERM;
-
-	/* Check for valid size. */
-	if (map->size < vma->vm_end - vma->vm_start)
-		return -EINVAL;
-
-	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
-		vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
-#if defined(__i386__) || defined(__x86_64__)
-		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
-#else
-		/* Ye gads this is ugly.  With more thought
-		   we could move this up higher and use
-		   `protection_map' instead.  */
-		vma->vm_page_prot =
-		    __pgprot(pte_val
-			     (pte_wrprotect
-			      (__pte(pgprot_val(vma->vm_page_prot)))));
-#endif
-	}
-
-	switch (map->type) {
-#if !defined(__arm__)
-	case _DRM_AGP:
-		if (dev->agp && dev->agp->cant_use_aperture) {
-			/*
-			 * On some platforms we can't talk to bus dma address from the CPU, so for
-			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
-			 * pages and mappings in fault()
-			 */
-#if defined(__powerpc__)
-			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
-			vma->vm_ops = &drm_vm_ops;
-			break;
-		}
-		fallthrough;	/* to _DRM_FRAME_BUFFER... */
-#endif
-	case _DRM_FRAME_BUFFER:
-	case _DRM_REGISTERS:
-		offset = drm_core_get_reg_ofs(dev);
-		vma->vm_page_prot = drm_io_prot(map, vma);
-		if (io_remap_pfn_range(vma, vma->vm_start,
-				       (map->offset + offset) >> PAGE_SHIFT,
-				       vma->vm_end - vma->vm_start,
-				       vma->vm_page_prot))
-			return -EAGAIN;
-		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
-			  " offset = 0x%llx\n",
-			  map->type,
-			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
-
-		vma->vm_ops = &drm_vm_ops;
-		break;
-	case _DRM_CONSISTENT:
-		/* Consistent memory is really like shared memory. But
-		 * it's allocated in a different way, so avoid fault */
-		if (remap_pfn_range(vma, vma->vm_start,
-		    page_to_pfn(virt_to_page(map->handle)),
-		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
-			return -EAGAIN;
-		vma->vm_page_prot = drm_dma_prot(map->type, vma);
-		fallthrough;	/* to _DRM_SHM */
-	case _DRM_SHM:
-		vma->vm_ops = &drm_vm_shm_ops;
-		vma->vm_private_data = (void *)map;
-		break;
-	case _DRM_SCATTER_GATHER:
-		vma->vm_ops = &drm_vm_sg_ops;
-		vma->vm_private_data = (void *)map;
-		vma->vm_page_prot = drm_dma_prot(map->type, vma);
-		break;
-	default:
-		return -EINVAL;	/* This should never happen. */
-	}
-	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-
-	drm_vm_open_locked(dev, vma);
-	return 0;
-}
-
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-	struct drm_file *priv = filp->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	int ret;
-
-	if (drm_dev_is_unplugged(dev))
-		return -ENODEV;
-
-	mutex_lock(&dev->struct_mutex);
-	ret = drm_mmap_locked(filp, vma);
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL(drm_legacy_mmap);
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-void drm_legacy_vma_flush(struct drm_device *dev)
-{
-	struct drm_vma_entry *vma, *vma_temp;
-
-	/* Clear vma list (only needed for legacy drivers) */
-	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
-		list_del(&vma->head);
-		kfree(vma);
-	}
-}
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 34cdabc30b4f5ee62fd788923cbf335bb4af9409..35771fb4e85d06402069a0e68d2fdf0a07606045 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -11,9 +11,10 @@
 #include <linux/component.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 
 #include <drm/drm_fourcc.h>
@@ -103,7 +104,7 @@ struct gsc_context {
 	unsigned int			num_formats;
 
 	void __iomem	*regs;
-	const char	**clk_names;
+	const char	*const *clk_names;
 	struct clk	*clocks[GSC_MAX_CLOCKS];
 	int		num_clocks;
 	struct gsc_scaler	sc;
@@ -1217,7 +1218,7 @@ static const unsigned int gsc_tiled_formats[] = {
 static int gsc_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
-	struct gsc_driverdata *driver_data;
+	const struct gsc_driverdata *driver_data;
 	struct exynos_drm_ipp_formats *formats;
 	struct gsc_context *ctx;
 	int num_formats, ret, i, j;
@@ -1226,7 +1227,7 @@ static int gsc_probe(struct platform_device *pdev)
 	if (!ctx)
 		return -ENOMEM;
 
-	driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev);
+	driver_data = device_get_match_data(dev);
 	ctx->dev = dev;
 	ctx->num_clocks = driver_data->num_clocks;
 	ctx->clk_names = driver_data->clk_names;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 683fd8d3151c365a6d868a8bdae6b125186f5882..ccc077b74d2de109fbc56db1580abf672d3ada65 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
 #include <linux/sync_file.h>
 #include <linux/uaccess.h>
 
+#include <drm/drm_auth.h>
 #include <drm/drm_syncobj.h>
 
 #include "display/intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/imagination/pvr_cccb.h b/drivers/gpu/drm/imagination/pvr_cccb.h
index f35b3d4c9575bc462345e06dbe8542b347e4d80e..943fe8f2c963be6c7e63d68442ff0f53ac0a9098 100644
--- a/drivers/gpu/drm/imagination/pvr_cccb.h
+++ b/drivers/gpu/drm/imagination/pvr_cccb.h
@@ -86,6 +86,7 @@ pvr_cccb_get_size_of_cmd_with_hdr(u32 cmd_size)
 
 /**
  * pvr_cccb_cmdseq_can_fit() - Check if a command sequence can fit in the CCCB.
+ * @pvr_cccb: Target Client CCB.
  * @size: Command sequence size.
  *
  * Returns:
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 8499becf4fbba8815e63a922d388b51461c53fb8..1704c0268589bdeb65fa6535f9ec63182b0a3e94 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -105,12 +105,12 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
 
 	sys_clk = devm_clk_get_optional(drm_dev->dev, "sys");
 	if (IS_ERR(sys_clk))
-		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
+		return dev_err_probe(drm_dev->dev, PTR_ERR(sys_clk),
 				     "failed to get sys clock\n");
 
 	mem_clk = devm_clk_get_optional(drm_dev->dev, "mem");
 	if (IS_ERR(mem_clk))
-		return dev_err_probe(drm_dev->dev, PTR_ERR(core_clk),
+		return dev_err_probe(drm_dev->dev, PTR_ERR(mem_clk),
 				     "failed to get mem clock\n");
 
 	pvr_dev->core_clk = core_clk;
@@ -127,7 +127,7 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
  * This is called any time we receive a FW event. It iterates over all
  * active queues and calls pvr_queue_process() on them.
  */
-void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
+static void pvr_device_process_active_queues(struct pvr_device *pvr_dev)
 {
 	struct pvr_queue *queue, *tmp_queue;
 	LIST_HEAD(active_queues);
@@ -286,8 +286,8 @@ pvr_request_firmware(struct pvr_device *pvr_dev)
 
 	filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue",
 					       PVR_FW_VERSION_MAJOR);
-	if (IS_ERR(filename))
-		return PTR_ERR(filename);
+	if (!filename)
+		return -ENOMEM;
 
 	/*
 	 * This function takes a copy of &filename, meaning we can free our
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index e07655fc65e8c001d1d420be3e7bf1b062bdbe9e..2ca7e535799fefcd29261f6f20e4aff8a86fc10f 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -203,17 +203,29 @@ struct pvr_device {
 		struct mutex lock;
 	} queues;
 
+	/**
+	 * @watchdog: Watchdog for communications with firmware.
+	 */
 	struct {
 		/** @work: Work item for watchdog callback. */
 		struct delayed_work work;
 
-		/** @old_kccb_cmds_executed: KCCB command execution count at last watchdog poll. */
+		/**
+		 * @old_kccb_cmds_executed: KCCB command execution count at last
+		 * watchdog poll.
+		 */
 		u32 old_kccb_cmds_executed;
 
-		/** @kccb_stall_count: Number of watchdog polls KCCB has been stalled for. */
+		/**
+		 * @kccb_stall_count: Number of watchdog polls KCCB has been
+		 * stalled for.
+		 */
 		u32 kccb_stall_count;
 	} watchdog;
 
+	/**
+	 * @kccb: Circular buffer for communications with firmware.
+	 */
 	struct {
 		/** @ccb: Kernel CCB. */
 		struct pvr_ccb ccb;
@@ -225,8 +237,8 @@ struct pvr_device {
 		struct pvr_fw_object *rtn_obj;
 
 		/**
-		 * @rtn: Pointer to CPU mapping of KCCB return slots. Must be accessed by
-		 *       READ_ONCE()/WRITE_ONCE().
+		 * @rtn: Pointer to CPU mapping of KCCB return slots. Must be
+		 * accessed by READ_ONCE()/WRITE_ONCE().
 		 */
 		u32 *rtn;
 
@@ -293,14 +305,13 @@ struct pvr_file {
 
 	/**
 	 * @pvr_dev: A reference to the powervr-specific wrapper for the
-	 *           associated device. Saves on repeated calls to
-	 *           to_pvr_device().
+	 * associated device. Saves on repeated calls to to_pvr_device().
 	 */
 	struct pvr_device *pvr_dev;
 
 	/**
 	 * @ctx_handles: Array of contexts belonging to this file. Array members
-	 *               are of type "struct pvr_context *".
+	 * are of type "struct pvr_context *".
 	 *
 	 * This array is used to allocate handles returned to userspace.
 	 */
diff --git a/drivers/gpu/drm/imagination/pvr_device_info.c b/drivers/gpu/drm/imagination/pvr_device_info.c
index 11e6bef52ecd01dc5dcf05e8fa1774ad0d0b897c..d3301cde7d119e7dc0f04f5a069d49e187ef7925 100644
--- a/drivers/gpu/drm/imagination/pvr_device_info.c
+++ b/drivers/gpu/drm/imagination/pvr_device_info.c
@@ -227,7 +227,8 @@ int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features
 	/* Verify no unsupported values in the bitmask. */
 	if (features_size > mapping_max_size) {
 		drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image");
-	} else if (features_size == mapping_max_size && (mapping_max & 63)) {
+	} else if (features_size == mapping_max_size &&
+		   ((mapping_max & 63) != 0)) {
 		u64 invalid_mask = ~0ull << (mapping_max & 63);
 
 		if (features[features_size - 1] & invalid_mask)
diff --git a/drivers/gpu/drm/imagination/pvr_free_list.c b/drivers/gpu/drm/imagination/pvr_free_list.c
index c61fd417edcb7ad0140258d6859b58bee6fea418..5e51bc980751c9e84f5365b633a22540426631ee 100644
--- a/drivers/gpu/drm/imagination/pvr_free_list.c
+++ b/drivers/gpu/drm/imagination/pvr_free_list.c
@@ -255,7 +255,7 @@ pvr_free_list_insert_pages_locked(struct pvr_free_list *free_list,
 
 		if (!num_pages)
 			break;
-	};
+	}
 	/* clang-format on */
 
 	/* Make sure our free_list update is flushed. */
diff --git a/drivers/gpu/drm/imagination/pvr_fw.h b/drivers/gpu/drm/imagination/pvr_fw.h
index 5cd3ef08d82b43682c00035c06b0016d38f90a47..b7966bd574a924862b7877c175fa2b5d757d89db 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.h
+++ b/drivers/gpu/drm/imagination/pvr_fw.h
@@ -481,7 +481,8 @@ pvr_fw_object_unmap_and_destroy(struct pvr_fw_object *fw_obj)
 }
 
 /**
- * pvr_fw_get_dma_addr() - Get DMA address for given offset in firmware object
+ * pvr_fw_object_get_dma_addr() - Get DMA address for given offset in firmware
+ * object.
  * @fw_obj: Pointer to object to lookup address in.
  * @offset: Offset within object to lookup address at.
  * @dma_addr_out: Pointer to location to store DMA address.
diff --git a/drivers/gpu/drm/imagination/pvr_fw_info.h b/drivers/gpu/drm/imagination/pvr_fw_info.h
index ad5d44a3067ac2ec81a90e1f0c87ac3585a0af00..c3639440610eb16ce7118d32d0f90410db76d1be 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_info.h
+++ b/drivers/gpu/drm/imagination/pvr_fw_info.h
@@ -122,13 +122,13 @@ struct pvr_fw_layout_entry {
  * struct pvr_fw_device_info_header - Device information header.
  */
 struct pvr_fw_device_info_header {
-	/* BRN Mask size (in u64s). */
+	/** @brn_mask_size: BRN mask size (in u64s). */
 	u64 brn_mask_size;
-	/* ERN Mask size (in u64s). */
+	/** @ern_mask_size: ERN mask size (in u64s). */
 	u64 ern_mask_size;
-	/* Feature Mask size (in u64s). */
+	/** @feature_mask_size: Feature mask size (in u64s). */
 	u64 feature_mask_size;
-	/* Feature Parameter size (in u64s). */
+	/** @feature_param_size: Feature parameter size (in u64s). */
 	u64 feature_param_size;
 };
 
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 119934c36184a367a8649ab9efee705b73095b58..c39beb70c3173ebdab13b4e810ce5d9a3419f0ba 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -4,6 +4,7 @@
 #include "pvr_device.h"
 #include "pvr_fw.h"
 #include "pvr_fw_info.h"
+#include "pvr_fw_meta.h"
 #include "pvr_gem.h"
 #include "pvr_rogue_cr_defs.h"
 #include "pvr_rogue_meta.h"
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 87a42fb6ace687fd49369bc692e1b7d87a4a43e3..7159fc479001c49306dc668cfb9c69456d585b01 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -121,6 +121,8 @@ void pvr_fw_trace_fini(struct pvr_device *pvr_dev)
 	pvr_fw_object_unmap_and_destroy(fw_trace->tracebuf_ctrl_obj);
 }
 
+#if defined(CONFIG_DEBUG_FS)
+
 /**
  * update_logtype() - Send KCCB command to trigger FW to update logtype
  * @pvr_dev: Target PowerVR device
@@ -165,52 +167,6 @@ update_logtype(struct pvr_device *pvr_dev, u32 group_mask)
 	return err;
 }
 
-#if defined(CONFIG_DEBUG_FS)
-
-static int fw_trace_group_mask_show(struct seq_file *m, void *data)
-{
-	struct pvr_device *pvr_dev = m->private;
-
-	seq_printf(m, "%08x\n", pvr_dev->fw_dev.fw_trace.group_mask);
-
-	return 0;
-}
-
-static int fw_trace_group_mask_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, fw_trace_group_mask_show, inode->i_private);
-}
-
-static ssize_t fw_trace_group_mask_write(struct file *file, const char __user *ubuf, size_t len,
-					 loff_t *offp)
-{
-	struct seq_file *m = file->private_data;
-	struct pvr_device *pvr_dev = m->private;
-	u32 new_group_mask;
-	int err;
-
-	err = kstrtouint_from_user(ubuf, len, 0, &new_group_mask);
-	if (err)
-		return err;
-
-	err = update_logtype(pvr_dev, new_group_mask);
-	if (err)
-		return err;
-
-	pvr_dev->fw_dev.fw_trace.group_mask = new_group_mask;
-
-	return (ssize_t)len;
-}
-
-static const struct file_operations pvr_fw_trace_group_mask_fops = {
-	.owner = THIS_MODULE,
-	.open = fw_trace_group_mask_open,
-	.read = seq_read,
-	.write = fw_trace_group_mask_write,
-	.llseek = default_llseek,
-	.release = single_release,
-};
-
 struct pvr_fw_trace_seq_data {
 	/** @buffer: Pointer to copy of trace data. */
 	u32 *buffer;
diff --git a/drivers/gpu/drm/imagination/pvr_hwrt.h b/drivers/gpu/drm/imagination/pvr_hwrt.h
index 76992948d0472ed5422d7cca67cdbd98052b96d3..676070b20c3ba552319057cfcabe69c890dbc832 100644
--- a/drivers/gpu/drm/imagination/pvr_hwrt.h
+++ b/drivers/gpu/drm/imagination/pvr_hwrt.h
@@ -64,6 +64,7 @@ struct pvr_hwrt_dataset {
 	/** @common_fw_obj: FW object representing common FW-side structure. */
 	struct pvr_fw_object *common_fw_obj;
 
+	/** @common: Common HWRT data. */
 	struct rogue_fwif_hwrtdata_common common;
 
 	/** @data: HWRT data structures belonging to this set. */
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 9d0812710295cfbc5ab578f159ed6a1aa400499e..04139da6c04dcd546930f8e29b1b8612941fef59 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -378,7 +378,7 @@ prepare_job_syncs(struct pvr_file *pvr_file,
 
 /**
  * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs.
- * @file: PowerVR file.
+ * @pvr_file: PowerVR file.
  * @job_data: Array of precreated jobs and their sync_ops.
  * @job_count: Number of jobs.
  * @signal_array: xarray to receive signal sync objects.
@@ -696,8 +696,6 @@ pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
  * @pvr_dev: Target PowerVR device.
  * @pvr_file: Pointer to PowerVR file structure.
  * @args: Ioctl args.
- * @job_count: Number of jobs in @jobs_args. On error this will be updated
- * with the index into @jobs_args where the error occurred.
  *
  * This initial implementation is entirely synchronous; on return the GPU will
  * be idle. This will not be the case for future implementations.
diff --git a/drivers/gpu/drm/imagination/pvr_mmu.c b/drivers/gpu/drm/imagination/pvr_mmu.c
index b71d30e5f380521dfff6c95d5e971f3b597cbc94..c8562bfc0dcda35875c0c39ebb43816f60f01af0 100644
--- a/drivers/gpu/drm/imagination/pvr_mmu.c
+++ b/drivers/gpu/drm/imagination/pvr_mmu.c
@@ -335,8 +335,9 @@ pvr_mmu_backing_page_fini(struct pvr_mmu_backing_page *page)
 
 /**
  * pvr_mmu_backing_page_sync() - Flush a MMU backing page from the CPU to the
- *                              device.
+ * device.
  * @page: Target backing page.
+ * @flags: MMU flush flags. Must be one of %PVR_MMU_SYNC_LEVEL_*_FLAGS.
  *
  * .. caution::
  *
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index d65c3fbedf5ac4efbf70aabf38972dcc9c310efd..5ed9c98fb599c8a66e959272db89d2b17ab314b4 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -1292,7 +1292,7 @@ struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
 		goto err_release_ufo;
 
 	err = drm_sched_entity_init(&queue->entity,
-				    DRM_SCHED_PRIORITY_MIN,
+				    DRM_SCHED_PRIORITY_KERNEL,
 				    &sched, 1, &ctx->faulty);
 	if (err)
 		goto err_sched_fini;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index b5ce2c742150b2bd3d068a0d63f045fae60c86cf..e06ced69302fca47fc26451dfb09ebbb24b57f52 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -50,7 +50,7 @@ struct pvr_queue_cccb_fence_ctx {
 	 */
 	struct pvr_job *job;
 
-	/** @lock: Lock protecting access to the job object. */
+	/** @job_lock: Lock protecting access to the job object. */
 	struct mutex job_lock;
 };
 
@@ -114,7 +114,7 @@ struct pvr_queue {
 	} timeline_ufo;
 
 	/**
-	 * last_queued_job_scheduled_fence: The scheduled fence of the last
+	 * @last_queued_job_scheduled_fence: The scheduled fence of the last
 	 * job queued to this queue.
 	 *
 	 * We use it to insert frag -> geom dependencies when issuing combined
diff --git a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
index 571954182f33d819f2699c11372dc83c5f0dbef2..56e11009e1230ad552e8bad9a1b12e1cf22b6aca 100644
--- a/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
+++ b/drivers/gpu/drm/imagination/pvr_rogue_fwif_sf.h
@@ -497,7 +497,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = {
 	{ ROGUE_FW_LOG_CREATESFID(213, ROGUE_FW_GROUP_MAIN, 1),
 	  "Safety Watchdog threshold period set to 0x%x clock cycles" },
 	{ ROGUE_FW_LOG_CREATESFID(214, ROGUE_FW_GROUP_MAIN, 0),
-	  "MTS Safety Event trigged by the safety watchdog." },
+	  "MTS Safety Event triggered by the safety watchdog." },
 	{ ROGUE_FW_LOG_CREATESFID(215, ROGUE_FW_GROUP_MAIN, 3),
 	  "DM%d USC tasks range limit 0 - %d, stride %d" },
 	{ ROGUE_FW_LOG_CREATESFID(216, ROGUE_FW_GROUP_MAIN, 1),
@@ -1114,7 +1114,7 @@ static const struct rogue_km_stid_fmt stid_fmts[] = {
 	{ ROGUE_FW_LOG_CREATESFID(39, ROGUE_FW_GROUP_SPM, 2),
 	  "3DMemFree matches freelist 0x%08x (FL type = %u)" },
 	{ ROGUE_FW_LOG_CREATESFID(40, ROGUE_FW_GROUP_SPM, 0),
-	  "Raise the 3DMemFreeDedected flag" },
+	  "Raise the 3DMemFreeDetected flag" },
 	{ ROGUE_FW_LOG_CREATESFID(41, ROGUE_FW_GROUP_SPM, 1),
 	  "Wait for pending grow on Freelist 0x%08x" },
 	{ ROGUE_FW_LOG_CREATESFID(42, ROGUE_FW_GROUP_SPM, 1),
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 3ad1366294b930d52fbc22c9fb9b04ab38950c93..f42345fbe4bf9183dd4b143b465514d554f072e8 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -42,7 +42,7 @@ struct pvr_vm_context {
 	/** @mmu_ctx: The context for binding to physical memory. */
 	struct pvr_mmu_context *mmu_ctx;
 
-	/** @gpuva_mgr: GPUVA manager object associated with this context. */
+	/** @gpuvm_mgr: GPUVM object associated with this context. */
 	struct drm_gpuvm gpuvm_mgr;
 
 	/** @lock: Global lock on this VM. */
@@ -64,6 +64,12 @@ struct pvr_vm_context {
 	struct drm_gem_object dummy_gem;
 };
 
+static inline
+struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
+{
+	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
+}
+
 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
 {
 	if (vm_ctx)
@@ -108,12 +114,6 @@ struct pvr_vm_gpuva {
 	struct drm_gpuva base;
 };
 
-static __always_inline
-struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva)
-{
-	return container_of(gpuva, struct pvr_vm_gpuva, base);
-}
-
 enum pvr_vm_bind_type {
 	PVR_VM_BIND_TYPE_MAP,
 	PVR_VM_BIND_TYPE_UNMAP,
@@ -224,6 +224,7 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
 			struct pvr_gem_object *pvr_obj, u64 offset,
 			u64 device_addr, u64 size)
 {
+	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
 	const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx;
 	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
 	struct sg_table *sgt;
@@ -238,17 +239,18 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
 		return -EINVAL;
 	}
 
-	if (!pvr_device_addr_and_size_are_valid(device_addr, size) ||
+	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
 	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
 	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
 		return -EINVAL;
 
 	bind_op->type = PVR_VM_BIND_TYPE_MAP;
 
-	bind_op->gpuvm_bo = drm_gpuvm_bo_create(&vm_ctx->gpuvm_mgr,
-						gem_from_pvr_gem(pvr_obj));
-	if (!bind_op->gpuvm_bo)
-		return -ENOMEM;
+	dma_resv_lock(obj->resv, NULL);
+	bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
+	dma_resv_unlock(obj->resv);
+	if (IS_ERR(bind_op->gpuvm_bo))
+		return PTR_ERR(bind_op->gpuvm_bo);
 
 	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
@@ -293,7 +295,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
 {
 	int err;
 
-	if (!pvr_device_addr_and_size_are_valid(device_addr, size))
+	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
 		return -EINVAL;
 
 	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
@@ -325,48 +327,6 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
 	return err;
 }
 
-static int
-pvr_vm_bind_op_lock_resvs(struct drm_exec *exec, struct pvr_vm_bind_op *bind_op)
-{
-	drm_exec_until_all_locked(exec) {
-		struct drm_gem_object *r_obj = &bind_op->vm_ctx->dummy_gem;
-		struct drm_gpuvm *gpuvm = &bind_op->vm_ctx->gpuvm_mgr;
-		struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
-		struct drm_gpuvm_bo *gpuvm_bo;
-
-		/* Acquire lock on the vm_context's reserve object. */
-		int err = drm_exec_lock_obj(exec, r_obj);
-
-		drm_exec_retry_on_contention(exec);
-		if (err)
-			return err;
-
-		/* Acquire lock on all BOs in the context. */
-		list_for_each_entry(gpuvm_bo, &gpuvm->extobj.list,
-				    list.entry.extobj) {
-			err = drm_exec_lock_obj(exec, gpuvm_bo->obj);
-
-			drm_exec_retry_on_contention(exec);
-			if (err)
-				return err;
-		}
-
-		/* Unmap operations don't have an object to lock. */
-		if (!pvr_obj)
-			break;
-
-		/* Acquire lock on the GEM being mapped. */
-		err = drm_exec_lock_obj(exec,
-					gem_from_pvr_gem(bind_op->pvr_obj));
-
-		drm_exec_retry_on_contention(exec);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
 /**
  * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
  * @op: gpuva op containing the remap details.
@@ -503,6 +463,7 @@ pvr_device_addr_is_valid(u64 device_addr)
 /**
  * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
  * address and associated size are both valid.
+ * @vm_ctx: Target VM context.
  * @device_addr: Virtual device address to test.
  * @size: Size of the range based at @device_addr to test.
  *
@@ -521,16 +482,18 @@ pvr_device_addr_is_valid(u64 device_addr)
  *  * %false otherwise.
  */
 bool
-pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size)
+pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+				   u64 device_addr, u64 size)
 {
 	return pvr_device_addr_is_valid(device_addr) &&
+	       drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
 	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
 	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
 }
 
-void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
+static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
 {
-
+	kfree(to_pvr_vm_context(gpuvm));
 }
 
 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
@@ -650,12 +613,11 @@ pvr_vm_context_release(struct kref *ref_count)
 	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
 			     vm_ctx->gpuvm_mgr.mm_range));
 
-	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
 	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
 	mutex_destroy(&vm_ctx->lock);
 
-	kfree(vm_ctx);
+	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
 }
 
 /**
@@ -721,6 +683,20 @@ void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
 	}
 }
 
+static int
+pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
+{
+	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
+	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
+
+	/* Unmap operations don't have an object to lock. */
+	if (!pvr_obj)
+		return 0;
+
+	/* Acquire lock on the GEM being mapped. */
+	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
+}
+
 /**
  * pvr_vm_map() - Map a section of physical memory into a section of
  * device-virtual memory.
@@ -748,7 +724,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
 	   u64 pvr_obj_offset, u64 device_addr, u64 size)
 {
 	struct pvr_vm_bind_op bind_op = {0};
-	struct drm_exec exec;
+	struct drm_gpuvm_exec vm_exec = {
+		.vm = &vm_ctx->gpuvm_mgr,
+		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+			 DRM_EXEC_IGNORE_DUPLICATES,
+		.extra = {
+			.fn = pvr_vm_lock_extra,
+			.priv = &bind_op,
+		},
+	};
 
 	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
 					  pvr_obj_offset, device_addr,
@@ -757,18 +741,15 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
 	if (err)
 		return err;
 
-	drm_exec_init(&exec,
-		      DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
-
 	pvr_gem_object_get(pvr_obj);
 
-	err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op);
+	err = drm_gpuvm_exec_lock(&vm_exec);
 	if (err)
 		goto err_cleanup;
 
 	err = pvr_vm_bind_op_exec(&bind_op);
 
-	drm_exec_fini(&exec);
+	drm_gpuvm_exec_unlock(&vm_exec);
 
 err_cleanup:
 	pvr_vm_bind_op_fini(&bind_op);
@@ -794,24 +775,28 @@ int
 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
 {
 	struct pvr_vm_bind_op bind_op = {0};
-	struct drm_exec exec;
+	struct drm_gpuvm_exec vm_exec = {
+		.vm = &vm_ctx->gpuvm_mgr,
+		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
+			 DRM_EXEC_IGNORE_DUPLICATES,
+		.extra = {
+			.fn = pvr_vm_lock_extra,
+			.priv = &bind_op,
+		},
+	};
 
 	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
 					    size);
-
 	if (err)
 		return err;
 
-	drm_exec_init(&exec,
-		      DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
-
-	err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op);
+	err = drm_gpuvm_exec_lock(&vm_exec);
 	if (err)
 		goto err_cleanup;
 
 	err = pvr_vm_bind_op_exec(&bind_op);
 
-	drm_exec_fini(&exec);
+	drm_gpuvm_exec_unlock(&vm_exec);
 
 err_cleanup:
 	pvr_vm_bind_op_fini(&bind_op);
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index cf8b97553dc85587f2a001b92491e6dac9d68e24..f2a6463f2b059ed70f88fe27a4db9b9be8292748 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -29,7 +29,8 @@ struct drm_exec;
 /* Functions defined in pvr_vm.c */
 
 bool pvr_device_addr_is_valid(u64 device_addr);
-bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size);
+bool pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
+					u64 device_addr, u64 size);
 
 struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
 					     bool is_userspace_context);
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index 7268cf6e630be58a0f7f1d2799011cbd2f49e786..2bc7181a4c3ebe6f7825aa29f90c8da9e2cee397 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -57,6 +57,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
 							       PAGE_SIZE, DMA_TO_DEVICE);
 		if (dma_mapping_error(dev, mips_data->pt_dma_addr[page_nr])) {
 			err = -ENOMEM;
+			__free_page(mips_data->pt_pages[page_nr]);
 			goto err_free_pages;
 		}
 	}
@@ -79,13 +80,11 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
 	return 0;
 
 err_free_pages:
-	for (; page_nr >= 0; page_nr--) {
-		if (mips_data->pt_dma_addr[page_nr])
-			dma_unmap_page(from_pvr_device(pvr_dev)->dev,
-				       mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
+	while (--page_nr >= 0) {
+		dma_unmap_page(from_pvr_device(pvr_dev)->dev,
+			       mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
 
-		if (mips_data->pt_pages[page_nr])
-			__free_page(mips_data->pt_pages[page_nr]);
+		__free_page(mips_data->pt_pages[page_nr]);
 	}
 
 	return err;
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 989eca32d3252a15a8a790cc425c493a6d48a69b..53840ab054c725bb7648419a8391e70992062dd8 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -12,8 +12,10 @@
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/videodev2.h>
 
@@ -617,7 +619,6 @@ static int imx_ldb_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
-	const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev);
 	struct device_node *child;
 	struct imx_ldb *imx_ldb;
 	int dual;
@@ -638,9 +639,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
 	regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
 
 	imx_ldb->dev = dev;
-
-	if (of_id)
-		imx_ldb->lvds_mux = of_id->data;
+	imx_ldb->lvds_mux = device_get_match_data(dev);
 
 	dual = of_property_read_bool(np, "fsl,dual-channel");
 	if (dual)
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 0902983374d0bcbc25c32925425f988a965d8211..43ddf3a9810b6d198aa37650129aa6d95cd21e00 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -342,21 +342,12 @@ static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers =
 	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
 };
 
-static void imx_lcdc_release(struct drm_device *drm)
-{
-	struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm);
-
-	drm_kms_helper_poll_fini(drm);
-	kfree(lcdc);
-}
-
 DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops);
 
 static struct drm_driver imx_lcdc_drm_driver = {
 	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
 	.fops = &imx_lcdc_drm_fops,
 	DRM_GEM_DMA_DRIVER_OPS_VMAP,
-	.release = imx_lcdc_release,
 	.name = "imx-lcdc",
 	.desc = "i.MX LCDC driver",
 	.date = "20200716",
diff --git a/drivers/gpu/drm/loongson/Kconfig b/drivers/gpu/drm/loongson/Kconfig
index df6946d505facf45f1a877c4696a577f27b0f2f7..8e59753e532de29dc49aeb79ef667054c96fbc7f 100644
--- a/drivers/gpu/drm/loongson/Kconfig
+++ b/drivers/gpu/drm/loongson/Kconfig
@@ -3,6 +3,7 @@
 config DRM_LOONGSON
 	tristate "DRM support for Loongson Graphics"
 	depends on DRM && PCI && MMU
+	depends on LOONGARCH || MIPS || COMPILE_TEST
 	select DRM_KMS_HELPER
 	select DRM_TTM
 	select I2C
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index 0d509463322293fd34692dbdf263bb0a2e43dca2..d227a2c1dcf16a3d5190de3893a55228ec70b254 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -9,7 +9,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
 
 #include "lsdc_drv.h"
 #include "lsdc_regs.h"
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4252e3839fbc833d68a6012d6b0f86e924be88da..2bfcb222e3533816cbe21b62dae62f8a09bfe934 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
  * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
  * cases, so we don't use it (no need for kernel generated jobs).
  */
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
 
 /**
  * struct msm_file_private - per-drm_file context
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 625c1bfc41733b23e05de4556e3c9841094f7922..b483ef48216aa12b80394c696ff2ff65eb85c531 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -11,9 +11,10 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/pm_runtime.h>
 
 #include <drm/drm_atomic_helper.h>
@@ -346,18 +347,13 @@ MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
 static int mxsfb_probe(struct platform_device *pdev)
 {
 	struct drm_device *drm;
-	const struct of_device_id *of_id =
-			of_match_device(mxsfb_dt_ids, &pdev->dev);
 	int ret;
 
-	if (!pdev->dev.of_node)
-		return -ENODEV;
-
 	drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev);
 	if (IS_ERR(drm))
 		return PTR_ERR(drm);
 
-	ret = mxsfb_load(drm, of_id->data);
+	ret = mxsfb_load(drm, device_get_match_data(&pdev->dev));
 	if (ret)
 		goto err_free;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 2edd7bb13faea5ebb15985867cab304d12d2da96..a04156ca8390ba6fea6a21e07e9eb5bba3ec7605 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -127,21 +127,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 {
 	struct nouveau_abi16_ntfy *ntfy, *temp;
 
-	/* When a client exits without waiting for it's queued up jobs to
-	 * finish it might happen that we fault the channel. This is due to
-	 * drm_file_free() calling drm_gem_release() before the postclose()
-	 * callback. Hence, we can't tear down this scheduler entity before
-	 * uvmm mappings are unmapped. Currently, we can't detect this case.
-	 *
-	 * However, this should be rare and harmless, since the channel isn't
-	 * needed anymore.
-	 */
-	nouveau_sched_entity_fini(&chan->sched_entity);
+	/* Cancel all jobs from the entity's queue. */
+	drm_sched_entity_fini(&chan->sched.entity);
 
-	/* wait for all activity to stop before cleaning up */
 	if (chan->chan)
 		nouveau_channel_idle(chan->chan);
 
+	nouveau_sched_fini(&chan->sched);
+
 	/* cleanup notifier state */
 	list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
 		nouveau_abi16_ntfy_fini(chan, ntfy);
@@ -344,8 +337,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 	if (ret)
 		goto done;
 
-	ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
-					drm->sched_wq);
+	ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
+				 chan->chan->dma.ib_max);
 	if (ret)
 		goto done;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 9f538486c10e37108ef0b4aaa41c1ad2542baaef..1f5e243c0c759ef759dbba7d4f89279c90bce5d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
 	struct nouveau_bo *ntfy;
 	struct nouveau_vma *ntfy_vma;
 	struct nvkm_mm  heap;
-	struct nouveau_sched_entity sched_entity;
+	struct nouveau_sched sched;
 };
 
 struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7afad86da64b7c98db2d3b33d2fcca0b39621a2b..b7dda486a7eac76ad4ef28adc10701d20381161b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1061,17 +1061,18 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct drm_gem_object *obj = &bo->base;
 	struct ttm_resource *old_reg = bo->resource;
 	struct nouveau_drm_tile *new_tile = NULL;
 	int ret = 0;
 
-
 	if (new_reg->mem_type == TTM_PL_TT) {
 		ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
 		if (ret)
 			return ret;
 	}
 
+	drm_gpuvm_bo_gem_evict(obj, evict);
 	nouveau_bo_move_ntfy(bo, new_reg);
 	ret = ttm_bo_wait_ctx(bo, ctx);
 	if (ret)
@@ -1136,6 +1137,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
 out_ntfy:
 	if (ret) {
 		nouveau_bo_move_ntfy(bo, bo->resource);
+		drm_gpuvm_bo_gem_evict(obj, !evict);
 	}
 	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index f603eaef1560867082b77df8c0b4453f23a11978..6f6c31a9937b2fe751c6cffe429cc21a6b47a385 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -201,9 +201,9 @@ nouveau_cli_fini(struct nouveau_cli *cli)
 	WARN_ON(!list_empty(&cli->worker));
 
 	usif_client_fini(cli);
+	nouveau_sched_fini(&cli->sched);
 	if (uvmm)
 		nouveau_uvmm_fini(uvmm);
-	nouveau_sched_entity_fini(&cli->sched_entity);
 	nouveau_vmm_fini(&cli->svm);
 	nouveau_vmm_fini(&cli->vmm);
 	nvif_mmu_dtor(&cli->mmu);
@@ -310,8 +310,17 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
 
 	cli->mem = &mems[ret];
 
-	ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
-					drm->sched_wq);
+	/* Don't pass in the (shared) sched_wq in order to let
+	 * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+	 *
+	 * This is required to ensure that for VM_BIND jobs free_job() work and
+	 * run_job() work can always run concurrently and hence, free_job() work
+	 * can never stall run_job() work. For EXEC jobs we don't have this
+	 * requirement, since EXEC job's free_job() does not require to take any
+	 * locks which indirectly or directly are held for allocations
+	 * elsewhere.
+	 */
+	ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
 	if (ret)
 		goto done;
 
@@ -582,13 +591,16 @@ nouveau_drm_device_init(struct drm_device *dev)
 	nvif_parent_ctor(&nouveau_parent, &drm->parent);
 	drm->master.base.object.parent = &drm->parent;
 
-	ret = nouveau_sched_init(drm);
-	if (ret)
+	drm->sched_wq = alloc_workqueue("nouveau_sched_wq_shared", 0,
+					WQ_MAX_ACTIVE);
+	if (!drm->sched_wq) {
+		ret = -ENOMEM;
 		goto fail_alloc;
+	}
 
 	ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
 	if (ret)
-		goto fail_sched;
+		goto fail_wq;
 
 	ret = nouveau_cli_init(drm, "DRM", &drm->client);
 	if (ret)
@@ -658,8 +670,8 @@ nouveau_drm_device_init(struct drm_device *dev)
 	nouveau_cli_fini(&drm->client);
 fail_master:
 	nouveau_cli_fini(&drm->master);
-fail_sched:
-	nouveau_sched_fini(drm);
+fail_wq:
+	destroy_workqueue(drm->sched_wq);
 fail_alloc:
 	nvif_parent_dtor(&drm->parent);
 	kfree(drm);
@@ -711,10 +723,9 @@ nouveau_drm_device_fini(struct drm_device *dev)
 	}
 	mutex_unlock(&drm->clients_lock);
 
-	nouveau_sched_fini(drm);
-
 	nouveau_cli_fini(&drm->client);
 	nouveau_cli_fini(&drm->master);
+	destroy_workqueue(drm->sched_wq);
 	nvif_parent_dtor(&drm->parent);
 	mutex_destroy(&drm->clients_lock);
 	kfree(drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 7f7051df84a6d808ce005a11626ae1c3d2ed32fb..8a6d94c8b1631fd7ab8bbc193f35b064057a0185 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -98,7 +98,7 @@ struct nouveau_cli {
 		bool disabled;
 	} uvmm;
 
-	struct nouveau_sched_entity sched_entity;
+	struct nouveau_sched sched;
 
 	const struct nvif_mclass *mem;
 
@@ -258,6 +258,9 @@ struct nouveau_drm {
 		u64 context_base;
 	} *runl;
 
+	/* Workqueue used for channel schedulers. */
+	struct workqueue_struct *sched_wq;
+
 	/* context for accelerated drm-internal operations */
 	struct nouveau_channel *cechan;
 	struct nouveau_channel *channel;
@@ -298,10 +301,6 @@ struct nouveau_drm {
 		struct mutex lock;
 		bool component_registered;
 	} audio;
-
-	struct drm_gpu_scheduler sched;
-	struct workqueue_struct *sched_wq;
-
 };
 
 static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 9a5ef574744b34e32b49c1bf9d1270f14e702d0a..bc5d71b79ab203ff7e874c612f3ea1e7c36323de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: MIT
 
-#include <drm/drm_exec.h>
-
 #include "nouveau_drv.h"
 #include "nouveau_gem.h"
 #include "nouveau_mem.h"
@@ -86,14 +84,12 @@
  */
 
 static int
-nouveau_exec_job_submit(struct nouveau_job *job)
+nouveau_exec_job_submit(struct nouveau_job *job,
+			struct drm_gpuvm_exec *vme)
 {
 	struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
 	struct nouveau_cli *cli = job->cli;
 	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
-	struct drm_exec *exec = &job->exec;
-	struct drm_gem_object *obj;
-	unsigned long index;
 	int ret;
 
 	/* Create a new fence, but do not emit yet. */
@@ -102,52 +98,29 @@ nouveau_exec_job_submit(struct nouveau_job *job)
 		return ret;
 
 	nouveau_uvmm_lock(uvmm);
-	drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
-			    DRM_EXEC_IGNORE_DUPLICATES);
-	drm_exec_until_all_locked(exec) {
-		struct drm_gpuva *va;
-
-		drm_gpuvm_for_each_va(va, &uvmm->base) {
-			if (unlikely(va == &uvmm->base.kernel_alloc_node))
-				continue;
-
-			ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
-			drm_exec_retry_on_contention(exec);
-			if (ret)
-				goto err_uvmm_unlock;
-		}
+	ret = drm_gpuvm_exec_lock(vme);
+	if (ret) {
+		nouveau_uvmm_unlock(uvmm);
+		return ret;
 	}
 	nouveau_uvmm_unlock(uvmm);
 
-	drm_exec_for_each_locked_object(exec, index, obj) {
-		struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
-		ret = nouveau_bo_validate(nvbo, true, false);
-		if (ret)
-			goto err_exec_fini;
+	ret = drm_gpuvm_exec_validate(vme);
+	if (ret) {
+		drm_gpuvm_exec_unlock(vme);
+		return ret;
 	}
 
 	return 0;
-
-err_uvmm_unlock:
-	nouveau_uvmm_unlock(uvmm);
-err_exec_fini:
-	drm_exec_fini(exec);
-	return ret;
-
 }
 
 static void
-nouveau_exec_job_armed_submit(struct nouveau_job *job)
+nouveau_exec_job_armed_submit(struct nouveau_job *job,
+			      struct drm_gpuvm_exec *vme)
 {
-	struct drm_exec *exec = &job->exec;
-	struct drm_gem_object *obj;
-	unsigned long index;
-
-	drm_exec_for_each_locked_object(exec, index, obj)
-		dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
-	drm_exec_fini(exec);
+	drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+				      job->resv_usage, job->resv_usage);
+	drm_gpuvm_exec_unlock(vme);
 }
 
 static struct dma_fence *
@@ -192,6 +165,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
 {
 	struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
 
+	nouveau_job_done(job);
 	nouveau_job_free(job);
 
 	kfree(exec_job->fence);
@@ -211,8 +185,6 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
 	NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
 		  chan->chid);
 
-	nouveau_sched_entity_fini(job->entity);
-
 	return DRM_GPU_SCHED_STAT_NOMINAL;
 }
 
@@ -259,10 +231,12 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
 		}
 	}
 
+	args.file_priv = __args->file_priv;
 	job->chan = __args->chan;
 
-	args.sched_entity = __args->sched_entity;
-	args.file_priv = __args->file_priv;
+	args.sched = __args->sched;
+	/* Plus one to account for the HW fence. */
+	args.credits = job->push.count + 1;
 
 	args.in_sync.count = __args->in_sync.count;
 	args.in_sync.s = __args->in_sync.s;
@@ -415,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
 	if (ret)
 		goto out;
 
-	args.sched_entity = &chan16->sched_entity;
+	args.sched = &chan16->sched;
 	args.file_priv = file_priv;
 	args.chan = chan;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.h b/drivers/gpu/drm/nouveau/nouveau_exec.h
index 5488d337bcc0e29852a20e2e2c6af986cc60f9fe..9b3b151facfd8ac6f2dc8ba3b7dfd788c3a84587 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.h
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.h
@@ -3,16 +3,12 @@
 #ifndef __NOUVEAU_EXEC_H__
 #define __NOUVEAU_EXEC_H__
 
-#include <drm/drm_exec.h>
-
 #include "nouveau_drv.h"
 #include "nouveau_sched.h"
 
 struct nouveau_exec_job_args {
 	struct drm_file *file_priv;
-	struct nouveau_sched_entity *sched_entity;
-
-	struct drm_exec exec;
+	struct nouveau_sched *sched;
 	struct nouveau_channel *chan;
 
 	struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index e03fd2bc8a11bdfe50707be14cc64dc7f6587629..dd98f6910f9cab7b19117186339a138277e77b78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -12,30 +12,28 @@
 #include "nouveau_abi16.h"
 #include "nouveau_sched.h"
 
-/* FIXME
- *
- * We want to make sure that jobs currently executing can't be deferred by
- * other jobs competing for the hardware. Otherwise we might end up with job
- * timeouts just because of too many clients submitting too many jobs. We don't
- * want jobs to time out because of system load, but because of the job being
- * too bulky.
- *
- * For now allow for up to 16 concurrent jobs in flight until we know how many
- * rings the hardware can process in parallel.
- */
-#define NOUVEAU_SCHED_HW_SUBMISSIONS		16
 #define NOUVEAU_SCHED_JOB_TIMEOUT_MS		10000
 
+/* Starts at 0, since the DRM scheduler interprets those parameters as (initial)
+ * index to the run-queue array.
+ */
+enum nouveau_sched_priority {
+	NOUVEAU_SCHED_PRIORITY_SINGLE = DRM_SCHED_PRIORITY_KERNEL,
+	NOUVEAU_SCHED_PRIORITY_COUNT,
+};
+
 int
 nouveau_job_init(struct nouveau_job *job,
 		 struct nouveau_job_args *args)
 {
-	struct nouveau_sched_entity *entity = args->sched_entity;
+	struct nouveau_sched *sched = args->sched;
 	int ret;
 
+	INIT_LIST_HEAD(&job->entry);
+
 	job->file_priv = args->file_priv;
 	job->cli = nouveau_cli(args->file_priv);
-	job->entity = entity;
+	job->sched = sched;
 
 	job->sync = args->sync;
 	job->resv_usage = args->resv_usage;
@@ -86,10 +84,10 @@ nouveau_job_init(struct nouveau_job *job,
 			ret = -ENOMEM;
 			goto err_free_objs;
 		}
-
 	}
 
-	ret = drm_sched_job_init(&job->base, &entity->base, 1, NULL);
+	ret = drm_sched_job_init(&job->base, &sched->entity,
+				 args->credits, NULL);
 	if (ret)
 		goto err_free_chains;
 
@@ -108,6 +106,27 @@ nouveau_job_init(struct nouveau_job *job,
 return ret;
 }
 
+void
+nouveau_job_fini(struct nouveau_job *job)
+{
+	dma_fence_put(job->done_fence);
+	drm_sched_job_cleanup(&job->base);
+
+	job->ops->free(job);
+}
+
+void
+nouveau_job_done(struct nouveau_job *job)
+{
+	struct nouveau_sched *sched = job->sched;
+
+	spin_lock(&sched->job.list.lock);
+	list_del(&job->entry);
+	spin_unlock(&sched->job.list.lock);
+
+	wake_up(&sched->job.wq);
+}
+
 void
 nouveau_job_free(struct nouveau_job *job)
 {
@@ -117,13 +136,6 @@ nouveau_job_free(struct nouveau_job *job)
 	kfree(job->out_sync.chains);
 }
 
-void nouveau_job_fini(struct nouveau_job *job)
-{
-	dma_fence_put(job->done_fence);
-	drm_sched_job_cleanup(&job->base);
-	job->ops->free(job);
-}
-
 static int
 sync_find_fence(struct nouveau_job *job,
 		struct drm_nouveau_sync *sync,
@@ -261,8 +273,13 @@ nouveau_job_fence_attach(struct nouveau_job *job)
 int
 nouveau_job_submit(struct nouveau_job *job)
 {
-	struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
+	struct nouveau_sched *sched = job->sched;
 	struct dma_fence *done_fence = NULL;
+	struct drm_gpuvm_exec vm_exec = {
+		.vm = &nouveau_cli_uvmm(job->cli)->base,
+		.flags = DRM_EXEC_IGNORE_DUPLICATES,
+		.num_fences = 1,
+	};
 	int ret;
 
 	ret = nouveau_job_add_deps(job);
@@ -276,46 +293,29 @@ nouveau_job_submit(struct nouveau_job *job)
 	/* Make sure the job appears on the sched_entity's queue in the same
 	 * order as it was submitted.
 	 */
-	mutex_lock(&entity->mutex);
+	mutex_lock(&sched->mutex);
 
 	/* Guarantee we won't fail after the submit() callback returned
 	 * successfully.
 	 */
 	if (job->ops->submit) {
-		ret = job->ops->submit(job);
+		ret = job->ops->submit(job, &vm_exec);
 		if (ret)
 			goto err_cleanup;
 	}
 
+	/* Submit was successful; add the job to the schedulers job list. */
+	spin_lock(&sched->job.list.lock);
+	list_add(&job->entry, &sched->job.list.head);
+	spin_unlock(&sched->job.list.lock);
+
 	drm_sched_job_arm(&job->base);
 	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
 	if (job->sync)
 		done_fence = dma_fence_get(job->done_fence);
 
-	/* If a sched job depends on a dma-fence from a job from the same GPU
-	 * scheduler instance, but a different scheduler entity, the GPU
-	 * scheduler does only wait for the particular job to be scheduled,
-	 * rather than for the job to fully complete. This is due to the GPU
-	 * scheduler assuming that there is a scheduler instance per ring.
-	 * However, the current implementation, in order to avoid arbitrary
-	 * amounts of kthreads, has a single scheduler instance while scheduler
-	 * entities represent rings.
-	 *
-	 * As a workaround, set the DRM_SCHED_FENCE_DONT_PIPELINE for all
-	 * out-fences in order to force the scheduler to wait for full job
-	 * completion for dependent jobs from different entities and same
-	 * scheduler instance.
-	 *
-	 * There is some work in progress [1] to address the issues of firmware
-	 * schedulers; once it is in-tree the scheduler topology in Nouveau
-	 * should be re-worked accordingly.
-	 *
-	 * [1] https://lore.kernel.org/dri-devel/20230801205103.627779-1-matthew.brost@intel.com/
-	 */
-	set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
-
 	if (job->ops->armed_submit)
-		job->ops->armed_submit(job);
+		job->ops->armed_submit(job, &vm_exec);
 
 	nouveau_job_fence_attach(job);
 
@@ -326,7 +326,7 @@ nouveau_job_submit(struct nouveau_job *job)
 
 	drm_sched_entity_push_job(&job->base);
 
-	mutex_unlock(&entity->mutex);
+	mutex_unlock(&sched->mutex);
 
 	if (done_fence) {
 		dma_fence_wait(done_fence, true);
@@ -336,20 +336,13 @@ nouveau_job_submit(struct nouveau_job *job)
 	return 0;
 
 err_cleanup:
-	mutex_unlock(&entity->mutex);
+	mutex_unlock(&sched->mutex);
 	nouveau_job_fence_attach_cleanup(job);
 err:
 	job->state = NOUVEAU_JOB_SUBMIT_FAILED;
 	return ret;
 }
 
-bool
-nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
-			   struct work_struct *work)
-{
-	return queue_work(entity->sched_wq, work);
-}
-
 static struct dma_fence *
 nouveau_job_run(struct nouveau_job *job)
 {
@@ -399,50 +392,82 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
 	nouveau_job_fini(job);
 }
 
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
-			      struct drm_gpu_scheduler *sched,
-			      struct workqueue_struct *sched_wq)
-{
-	mutex_init(&entity->mutex);
-	spin_lock_init(&entity->job.list.lock);
-	INIT_LIST_HEAD(&entity->job.list.head);
-	init_waitqueue_head(&entity->job.wq);
-
-	entity->sched_wq = sched_wq;
-	return drm_sched_entity_init(&entity->base,
-				     DRM_SCHED_PRIORITY_NORMAL,
-				     &sched, 1, NULL);
-}
-
-void
-nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
-{
-	drm_sched_entity_destroy(&entity->base);
-}
-
 static const struct drm_sched_backend_ops nouveau_sched_ops = {
 	.run_job = nouveau_sched_run_job,
 	.timedout_job = nouveau_sched_timedout_job,
 	.free_job = nouveau_sched_free_job,
 };
 
-int nouveau_sched_init(struct nouveau_drm *drm)
+int
+nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+		   struct workqueue_struct *wq, u32 credit_limit)
 {
-	struct drm_gpu_scheduler *sched = &drm->sched;
+	struct drm_gpu_scheduler *drm_sched = &sched->base;
+	struct drm_sched_entity *entity = &sched->entity;
 	long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
+	int ret;
 
-	drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
-	if (!drm->sched_wq)
-		return -ENOMEM;
+	if (!wq) {
+		wq = alloc_workqueue("nouveau_sched_wq_%d", 0, WQ_MAX_ACTIVE,
+				     current->pid);
+		if (!wq)
+			return -ENOMEM;
+
+		sched->wq = wq;
+	}
 
-	return drm_sched_init(sched, &nouveau_sched_ops, NULL,
-			      DRM_SCHED_PRIORITY_COUNT,
-			      NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
-			      NULL, NULL, "nouveau_sched", drm->dev->dev);
+	ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq,
+			     NOUVEAU_SCHED_PRIORITY_COUNT,
+			     credit_limit, 0, job_hang_limit,
+			     NULL, NULL, "nouveau_sched", drm->dev->dev);
+	if (ret)
+		goto fail_wq;
+
+	/* Using DRM_SCHED_PRIORITY_KERNEL, since that's what we're required to use
+	 * when we want to have a single run-queue only.
+	 *
+	 * It's not documented, but one will find out when trying to use any
+	 * other priority running into faults, because the scheduler uses the
+	 * priority as array index.
+	 *
+	 * Can't use NOUVEAU_SCHED_PRIORITY_SINGLE either, because it's not
+	 * matching the enum type used in drm_sched_entity_init().
+	 */
+	ret = drm_sched_entity_init(entity, DRM_SCHED_PRIORITY_KERNEL,
+				    &drm_sched, 1, NULL);
+	if (ret)
+		goto fail_sched;
+
+	mutex_init(&sched->mutex);
+	spin_lock_init(&sched->job.list.lock);
+	INIT_LIST_HEAD(&sched->job.list.head);
+	init_waitqueue_head(&sched->job.wq);
+
+	return 0;
+
+fail_sched:
+	drm_sched_fini(drm_sched);
+fail_wq:
+	if (sched->wq)
+		destroy_workqueue(sched->wq);
+	return ret;
 }
 
-void nouveau_sched_fini(struct nouveau_drm *drm)
+void
+nouveau_sched_fini(struct nouveau_sched *sched)
 {
-	destroy_workqueue(drm->sched_wq);
-	drm_sched_fini(&drm->sched);
+	struct drm_gpu_scheduler *drm_sched = &sched->base;
+	struct drm_sched_entity *entity = &sched->entity;
+
+	rmb(); /* for list_empty to work without lock */
+	wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+
+	drm_sched_entity_fini(entity);
+	drm_sched_fini(drm_sched);
+
+	/* Destroy workqueue after scheduler tear down, otherwise it might still
+	 * be in use.
+	 */
+	if (sched->wq)
+		destroy_workqueue(sched->wq);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index 27ac19792597bd6be859e89b6c2ba8fda51fd954..a6528f5981e6a6e8182a44e0ec3c0336302e6154 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -5,7 +5,7 @@
 
 #include <linux/types.h>
 
-#include <drm/drm_exec.h>
+#include <drm/drm_gpuvm.h>
 #include <drm/gpu_scheduler.h>
 
 #include "nouveau_drv.h"
@@ -26,7 +26,8 @@ enum nouveau_job_state {
 
 struct nouveau_job_args {
 	struct drm_file *file_priv;
-	struct nouveau_sched_entity *sched_entity;
+	struct nouveau_sched *sched;
+	u32 credits;
 
 	enum dma_resv_usage resv_usage;
 	bool sync;
@@ -49,12 +50,12 @@ struct nouveau_job {
 
 	enum nouveau_job_state state;
 
-	struct nouveau_sched_entity *entity;
+	struct nouveau_sched *sched;
+	struct list_head entry;
 
 	struct drm_file *file_priv;
 	struct nouveau_cli *cli;
 
-	struct drm_exec exec;
 	enum dma_resv_usage resv_usage;
 	struct dma_fence *done_fence;
 
@@ -76,8 +77,8 @@ struct nouveau_job {
 		/* If .submit() returns without any error, it is guaranteed that
 		 * armed_submit() is called.
 		 */
-		int (*submit)(struct nouveau_job *);
-		void (*armed_submit)(struct nouveau_job *);
+		int (*submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
+		void (*armed_submit)(struct nouveau_job *, struct drm_gpuvm_exec *);
 		struct dma_fence *(*run)(struct nouveau_job *);
 		void (*free)(struct nouveau_job *);
 		enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
@@ -90,20 +91,17 @@ int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
 
 int nouveau_job_init(struct nouveau_job *job,
 		     struct nouveau_job_args *args);
-void nouveau_job_free(struct nouveau_job *job);
-
-int nouveau_job_submit(struct nouveau_job *job);
 void nouveau_job_fini(struct nouveau_job *job);
+int nouveau_job_submit(struct nouveau_job *job);
+void nouveau_job_done(struct nouveau_job *job);
+void nouveau_job_free(struct nouveau_job *job);
 
-#define to_nouveau_sched_entity(entity)		\
-		container_of((entity), struct nouveau_sched_entity, base)
-
-struct nouveau_sched_entity {
-	struct drm_sched_entity base;
+struct nouveau_sched {
+	struct drm_gpu_scheduler base;
+	struct drm_sched_entity entity;
+	struct workqueue_struct *wq;
 	struct mutex mutex;
 
-	struct workqueue_struct *sched_wq;
-
 	struct {
 		struct {
 			struct list_head head;
@@ -113,15 +111,8 @@ struct nouveau_sched_entity {
 	} job;
 };
 
-int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
-			      struct drm_gpu_scheduler *sched,
-			      struct workqueue_struct *sched_wq);
-void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
-
-bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
-				struct work_struct *work);
-
-int nouveau_sched_init(struct nouveau_drm *drm);
-void nouveau_sched_fini(struct nouveau_drm *drm);
+int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
+		       struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_fini(struct nouveau_sched *sched);
 
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index eda7bb8624f112d8046f24515d99726ef17f88fe..dae3baf707a0b34168378f35a1705de1047e5056 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -438,8 +438,9 @@ nouveau_uvma_region_complete(struct nouveau_uvma_region *reg)
 static void
 op_map_prepare_unwind(struct nouveau_uvma *uvma)
 {
+	struct drm_gpuva *va = &uvma->va;
 	nouveau_uvma_gem_put(uvma);
-	drm_gpuva_remove(&uvma->va);
+	drm_gpuva_remove(va);
 	nouveau_uvma_free(uvma);
 }
 
@@ -468,6 +469,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
 			break;
 		case DRM_GPUVA_OP_REMAP: {
 			struct drm_gpuva_op_remap *r = &op->remap;
+			struct drm_gpuva *va = r->unmap->va;
 
 			if (r->next)
 				op_map_prepare_unwind(new->next);
@@ -475,7 +477,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
 			if (r->prev)
 				op_map_prepare_unwind(new->prev);
 
-			op_unmap_prepare_unwind(r->unmap->va);
+			op_unmap_prepare_unwind(va);
 			break;
 		}
 		case DRM_GPUVA_OP_UNMAP:
@@ -606,6 +608,9 @@ op_unmap_prepare(struct drm_gpuva_op_unmap *u)
 	drm_gpuva_unmap(u);
 }
 
+/*
+ * Note: @args should not be NULL when calling for a map operation.
+ */
 static int
 nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
 			struct nouveau_uvma_prealloc *new,
@@ -626,7 +631,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
 			if (ret)
 				goto unwind;
 
-			if (args && vmm_get_range) {
+			if (vmm_get_range) {
 				ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
 							   vmm_get_range);
 				if (ret) {
@@ -634,6 +639,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
 					goto unwind;
 				}
 			}
+
 			break;
 		}
 		case DRM_GPUVA_OP_REMAP: {
@@ -960,6 +966,12 @@ nouveau_uvmm_bind_job_free(struct kref *kref)
 {
 	struct nouveau_uvmm_bind_job *job =
 		container_of(kref, struct nouveau_uvmm_bind_job, kref);
+	struct bind_job_op *op, *next;
+
+	list_for_each_op_safe(op, next, &job->ops) {
+		list_del(&op->entry);
+		kfree(op);
+	}
 
 	nouveau_job_free(&job->base);
 	kfree(job);
@@ -1001,14 +1013,16 @@ bind_validate_op(struct nouveau_job *job,
 static void
 bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
 {
-	struct nouveau_uvmm_bind_job *bind_job;
-	struct nouveau_sched_entity *entity = job->entity;
+	struct nouveau_sched *sched = job->sched;
+	struct nouveau_job *__job;
 	struct bind_job_op *op;
 	u64 end = addr + range;
 
 again:
-	spin_lock(&entity->job.list.lock);
-	list_for_each_entry(bind_job, &entity->job.list.head, entry) {
+	spin_lock(&sched->job.list.lock);
+	list_for_each_entry(__job, &sched->job.list.head, entry) {
+		struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
+
 		list_for_each_op(op, &bind_job->ops) {
 			if (op->op == OP_UNMAP) {
 				u64 op_addr = op->va.addr;
@@ -1016,7 +1030,7 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
 
 				if (!(end <= op_addr || addr >= op_end)) {
 					nouveau_uvmm_bind_job_get(bind_job);
-					spin_unlock(&entity->job.list.lock);
+					spin_unlock(&sched->job.list.lock);
 					wait_for_completion(&bind_job->complete);
 					nouveau_uvmm_bind_job_put(bind_job);
 					goto again;
@@ -1024,7 +1038,7 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
 			}
 		}
 	}
-	spin_unlock(&entity->job.list.lock);
+	spin_unlock(&sched->job.list.lock);
 }
 
 static int
@@ -1135,12 +1149,52 @@ bind_link_gpuvas(struct bind_job_op *bop)
 }
 
 static int
-nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
+bind_lock_validate(struct nouveau_job *job, struct drm_exec *exec,
+		   unsigned int num_fences)
+{
+	struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
+	struct bind_job_op *op;
+	int ret;
+
+	list_for_each_op(op, &bind_job->ops) {
+		struct drm_gpuva_op *va_op;
+
+		if (!op->ops)
+			continue;
+
+		drm_gpuva_for_each_op(va_op, op->ops) {
+			struct drm_gem_object *obj = op_gem_obj(va_op);
+
+			if (unlikely(!obj))
+				continue;
+
+			ret = drm_exec_prepare_obj(exec, obj, num_fences);
+			if (ret)
+				return ret;
+
+			/* Don't validate GEMs backing mappings we're about to
+			 * unmap, it's not worth the effort.
+			 */
+			if (va_op->op == DRM_GPUVA_OP_UNMAP)
+				continue;
+
+			ret = nouveau_bo_validate(nouveau_gem_object(obj),
+						  true, false);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
+			     struct drm_gpuvm_exec *vme)
 {
 	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
 	struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
-	struct nouveau_sched_entity *entity = job->entity;
-	struct drm_exec *exec = &job->exec;
+	struct drm_exec *exec = &vme->exec;
 	struct bind_job_op *op;
 	int ret;
 
@@ -1157,6 +1211,8 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
 			dma_resv_unlock(obj->resv);
 			if (IS_ERR(op->vm_bo))
 				return PTR_ERR(op->vm_bo);
+
+			drm_gpuvm_bo_extobj_add(op->vm_bo);
 		}
 
 		ret = bind_validate_op(job, op);
@@ -1179,6 +1235,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
 	 * unwind all GPU VA space changes on failure.
 	 */
 	nouveau_uvmm_lock(uvmm);
+
 	list_for_each_op(op, &bind_job->ops) {
 		switch (op->op) {
 		case OP_MAP_SPARSE:
@@ -1290,55 +1347,13 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
 		}
 	}
 
-	drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
-			    DRM_EXEC_IGNORE_DUPLICATES);
+	drm_exec_init(exec, vme->flags);
 	drm_exec_until_all_locked(exec) {
-		list_for_each_op(op, &bind_job->ops) {
-			struct drm_gpuva_op *va_op;
-
-			if (IS_ERR_OR_NULL(op->ops))
-				continue;
-
-			drm_gpuva_for_each_op(va_op, op->ops) {
-				struct drm_gem_object *obj = op_gem_obj(va_op);
-
-				if (unlikely(!obj))
-					continue;
-
-				ret = drm_exec_prepare_obj(exec, obj, 1);
-				drm_exec_retry_on_contention(exec);
-				if (ret) {
-					op = list_last_op(&bind_job->ops);
-					goto unwind;
-				}
-			}
-		}
-	}
-
-	list_for_each_op(op, &bind_job->ops) {
-		struct drm_gpuva_op *va_op;
-
-		if (IS_ERR_OR_NULL(op->ops))
-			continue;
-
-		drm_gpuva_for_each_op(va_op, op->ops) {
-			struct drm_gem_object *obj = op_gem_obj(va_op);
-
-			if (unlikely(!obj))
-				continue;
-
-			/* Don't validate GEMs backing mappings we're about to
-			 * unmap, it's not worth the effort.
-			 */
-			if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP))
-				continue;
-
-			ret = nouveau_bo_validate(nouveau_gem_object(obj),
-						  true, false);
-			if (ret) {
-				op = list_last_op(&bind_job->ops);
-				goto unwind;
-			}
+		ret = bind_lock_validate(job, exec, vme->num_fences);
+		drm_exec_retry_on_contention(exec);
+		if (ret) {
+			op = list_last_op(&bind_job->ops);
+			goto unwind;
 		}
 	}
 
@@ -1375,10 +1390,6 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
 	}
 	nouveau_uvmm_unlock(uvmm);
 
-	spin_lock(&entity->job.list.lock);
-	list_add(&bind_job->entry, &entity->job.list.head);
-	spin_unlock(&entity->job.list.lock);
-
 	return 0;
 
 unwind_continue:
@@ -1413,21 +1424,17 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
 	}
 
 	nouveau_uvmm_unlock(uvmm);
-	drm_exec_fini(exec);
+	drm_gpuvm_exec_unlock(vme);
 	return ret;
 }
 
 static void
-nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job)
+nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job,
+				   struct drm_gpuvm_exec *vme)
 {
-	struct drm_exec *exec = &job->exec;
-	struct drm_gem_object *obj;
-	unsigned long index;
-
-	drm_exec_for_each_locked_object(exec, index, obj)
-		dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
-
-	drm_exec_fini(exec);
+	drm_gpuvm_exec_resv_add_fence(vme, job->done_fence,
+				      job->resv_usage, job->resv_usage);
+	drm_gpuvm_exec_unlock(vme);
 }
 
 static struct dma_fence *
@@ -1465,14 +1472,11 @@ nouveau_uvmm_bind_job_run(struct nouveau_job *job)
 }
 
 static void
-nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
+nouveau_uvmm_bind_job_cleanup(struct nouveau_job *job)
 {
-	struct nouveau_uvmm_bind_job *bind_job =
-		container_of(work, struct nouveau_uvmm_bind_job, work);
-	struct nouveau_job *job = &bind_job->base;
+	struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
 	struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
-	struct nouveau_sched_entity *entity = job->entity;
-	struct bind_job_op *op, *next;
+	struct bind_job_op *op;
 
 	list_for_each_op(op, &bind_job->ops) {
 		struct drm_gem_object *obj = op->gem.obj;
@@ -1524,38 +1528,17 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
 			drm_gem_object_put(obj);
 	}
 
-	spin_lock(&entity->job.list.lock);
-	list_del(&bind_job->entry);
-	spin_unlock(&entity->job.list.lock);
-
+	nouveau_job_done(job);
 	complete_all(&bind_job->complete);
-	wake_up(&entity->job.wq);
-
-	/* Remove and free ops after removing the bind job from the job list to
-	 * avoid races against bind_validate_map_sparse().
-	 */
-	list_for_each_op_safe(op, next, &bind_job->ops) {
-		list_del(&op->entry);
-		kfree(op);
-	}
 
 	nouveau_uvmm_bind_job_put(bind_job);
 }
 
-static void
-nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job)
-{
-	struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job);
-	struct nouveau_sched_entity *entity = job->entity;
-
-	nouveau_sched_entity_qwork(entity, &bind_job->work);
-}
-
 static struct nouveau_job_ops nouveau_bind_job_ops = {
 	.submit = nouveau_uvmm_bind_job_submit,
 	.armed_submit = nouveau_uvmm_bind_job_armed_submit,
 	.run = nouveau_uvmm_bind_job_run,
-	.free = nouveau_uvmm_bind_job_free_qwork,
+	.free = nouveau_uvmm_bind_job_cleanup,
 };
 
 static int
@@ -1616,7 +1599,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
 		return ret;
 
 	INIT_LIST_HEAD(&job->ops);
-	INIT_LIST_HEAD(&job->entry);
 
 	for (i = 0; i < __args->op.count; i++) {
 		ret = bind_job_op_from_uop(&op, &__args->op.s[i]);
@@ -1627,11 +1609,12 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob,
 	}
 
 	init_completion(&job->complete);
-	INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn);
 
-	args.sched_entity = __args->sched_entity;
 	args.file_priv = __args->file_priv;
 
+	args.sched = __args->sched;
+	args.credits = 1;
+
 	args.in_sync.count = __args->in_sync.count;
 	args.in_sync.s = __args->in_sync.s;
 
@@ -1757,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	args.sched_entity = &cli->sched_entity;
+	args.sched = &cli->sched;
 	args.file_priv = file_priv;
 
 	ret = nouveau_uvmm_vm_bind(&args);
@@ -1815,8 +1798,17 @@ nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
 	kfree(uvmm);
 }
 
+static int
+nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
+{
+	struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+
+	return nouveau_bo_validate(nvbo, true, false);
+}
+
 static const struct drm_gpuvm_ops gpuvm_ops = {
 	.vm_free = nouveau_uvmm_free,
+	.vm_bo_validate = nouveau_uvmm_bo_validate,
 };
 
 int
@@ -1900,12 +1892,8 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
 	MA_STATE(mas, &uvmm->region_mt, 0, 0);
 	struct nouveau_uvma_region *reg;
 	struct nouveau_cli *cli = uvmm->vmm.cli;
-	struct nouveau_sched_entity *entity = &cli->sched_entity;
 	struct drm_gpuva *va, *next;
 
-	rmb(); /* for list_empty to work without lock */
-	wait_event(entity->job.wq, list_empty(&entity->job.list.head));
-
 	nouveau_uvmm_lock(uvmm);
 	drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
 		struct nouveau_uvma *uvma = uvma_from_va(va);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index f0a6d98ace4fd8a01afa3d911747b373a71cf784..9d3c348581eb34a2b70c6211ab5fbca0d4356571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -44,8 +44,6 @@ struct nouveau_uvmm_bind_job {
 	struct nouveau_job base;
 
 	struct kref kref;
-	struct list_head entry;
-	struct work_struct work;
 	struct completion complete;
 
 	/* struct bind_job_op */
@@ -54,7 +52,7 @@ struct nouveau_uvmm_bind_job {
 
 struct nouveau_uvmm_bind_job_args {
 	struct drm_file *file_priv;
-	struct nouveau_sched_entity *sched_entity;
+	struct nouveau_sched *sched;
 
 	unsigned int flags;
 
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c26aab4939fa00c6c1ffc81f06711904c8c141b8..993691b3cc7ead3088a1be762a2dda319b144244 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -22,11 +22,11 @@
 #include <linux/hardirq.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/sizes.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/component.h>
 #include <linux/sys_soc.h>
 #include <drm/drm_fourcc.h>
@@ -4765,7 +4765,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
 	if (soc)
 		dispc->feat = soc->data;
 	else
-		dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
+		dispc->feat = device_get_match_data(&pdev->dev);
 
 	r = dispc_errata_i734_wa_init(dispc);
 	if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 02955f976845967f5fb1aa9645ee2282b4516767..988888e164d7b3e9a87cf340cb12dfebe30d64e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -22,12 +22,13 @@
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/gfp.h>
 #include <linux/sizes.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
 #include <linux/of_graph.h>
 #include <linux/regulator/consumer.h>
 #include <linux/suspend.h>
@@ -1445,7 +1446,7 @@ static int dss_probe(struct platform_device *pdev)
 	if (soc)
 		dss->feat = soc->data;
 	else
-		dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
+		dss->feat = device_get_match_data(&pdev->dev);
 
 	/* Map I/O registers, get and setup clocks. */
 	dss->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index c48fa531ca321c6071278f314bf140309e3b3e14..3421e8389222a4559264d405b9e124f8f26e4f64 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -48,7 +48,7 @@ struct omap_gem_object {
 	 *   OMAP_BO_MEM_DMA_API flag set)
 	 *
 	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
-	 *   if they are physically contiguous (when sgt->orig_nents == 1)
+	 *   if they are physically contiguous
 	 *
 	 * - buffers mapped through the TILER when pin_cnt is not zero, in which
 	 *   case the DMA address points to the TILER aperture
@@ -148,12 +148,18 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 	return drm_vma_node_offset_addr(&obj->vma_node);
 }
 
+static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
+{
+	return !(drm_prime_get_contiguous_size(sgt) < size);
+}
+
 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
 {
 	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
 		return true;
 
-	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
+	    omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
 		return true;
 
 	return false;
@@ -1385,7 +1391,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
 	union omap_gem_size gsize;
 
 	/* Without a DMM only physically contiguous buffers can be supported. */
-	if (sgt->orig_nents != 1 && !priv->has_dmm)
+	if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
 		return ERR_PTR(-EINVAL);
 
 	gsize.bytes = PAGE_ALIGN(size);
@@ -1399,7 +1405,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
 
 	omap_obj->sgt = sgt;
 
-	if (sgt->orig_nents == 1) {
+	if (omap_gem_sgt_is_contiguous(sgt, size)) {
 		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
 	} else {
 		/* Create pages list from sgt */
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index f2267737317158f13c3f512010c1ca0b8a19d83d..c76f186c4baaa76c57260d316b7b25107be691f6 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -203,6 +203,9 @@ struct edp_panel_entry {
 
 	/** @name: Name of this panel (for printing to logs). */
 	const char *name;
+
+	/** @override_edid_mode: Override the mode obtained by edid. */
+	const struct drm_display_mode *override_edid_mode;
 };
 
 struct panel_edp {
@@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
 	return num;
 }
 
+static int panel_edp_override_edid_mode(struct panel_edp *panel,
+					struct drm_connector *connector,
+					const struct drm_display_mode *override_mode)
+{
+	struct drm_display_mode *mode;
+
+	mode = drm_mode_duplicate(connector->dev, override_mode);
+	if (!mode) {
+		dev_err(panel->base.dev, "failed to add additional mode\n");
+		return 0;
+	}
+
+	mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+	drm_mode_set_name(mode);
+	drm_mode_probed_add(connector, mode);
+	return 1;
+}
+
 static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
 					struct drm_connector *connector)
 {
@@ -568,6 +589,10 @@ static int panel_edp_get_modes(struct drm_panel *panel,
 {
 	struct panel_edp *p = to_panel_edp(panel);
 	int num = 0;
+	bool has_hard_coded_modes = p->desc->num_timings || p->desc->num_modes;
+	bool has_override_edid_mode = p->detected_panel &&
+				      p->detected_panel != ERR_PTR(-EINVAL) &&
+				      p->detected_panel->override_edid_mode;
 
 	/* probe EDID if a DDC bus is available */
 	if (p->ddc) {
@@ -575,20 +600,28 @@ static int panel_edp_get_modes(struct drm_panel *panel,
 
 		if (!p->edid)
 			p->edid = drm_get_edid(connector, p->ddc);
-
-		if (p->edid)
-			num += drm_add_edid_modes(connector, p->edid);
+		/*
+		 * If both edid and hard-coded modes exists, skip edid modes to
+		 * avoid multiple preferred modes.
+		 */
+		if (p->edid && !has_hard_coded_modes) {
+			if (has_override_edid_mode) {
+				/*
+				 * override_edid_mode is specified. Use
+				 * override_edid_mode instead of from edid.
+				 */
+				num += panel_edp_override_edid_mode(p, connector,
+						p->detected_panel->override_edid_mode);
+			} else {
+				num += drm_add_edid_modes(connector, p->edid);
+			}
+		}
 
 		pm_runtime_mark_last_busy(panel->dev);
 		pm_runtime_put_autosuspend(panel->dev);
 	}
 
-	/*
-	 * Add hard-coded panel modes. Don't call this if there are no timings
-	 * and no modes (the generic edp-panel case) because it will clobber
-	 * the display_info that was already set by drm_add_edid_modes().
-	 */
-	if (p->desc->num_timings || p->desc->num_modes)
+	if (has_hard_coded_modes)
 		num += panel_edp_get_non_edid_modes(p, connector);
 	else if (!num)
 		dev_warn(p->base.dev, "No display modes\n");
@@ -950,6 +983,19 @@ static const struct panel_desc auo_b101ean01 = {
 	},
 };
 
+static const struct drm_display_mode auo_b116xa3_mode = {
+	.clock = 70589,
+	.hdisplay = 1366,
+	.hsync_start = 1366 + 40,
+	.hsync_end = 1366 + 40 + 40,
+	.htotal = 1366 + 40 + 40 + 32,
+	.vdisplay = 768,
+	.vsync_start = 768 + 10,
+	.vsync_end = 768 + 10 + 12,
+	.vtotal = 768 + 10 + 12 + 6,
+	.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
 static const struct drm_display_mode auo_b116xak01_mode = {
 	.clock = 69300,
 	.hdisplay = 1366,
@@ -1849,6 +1895,15 @@ static const struct panel_delay delay_200_150_e200 = {
 	.delay = _delay \
 }
 
+#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
+{ \
+	.name = _name, \
+	.panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+					     product_id), \
+	.delay = _delay, \
+	.override_edid_mode = _mode \
+}
+
 /*
  * This table is used to figure out power sequencing delays for panels that
  * are detected by EDID. Entries here may point to entries in the
@@ -1866,9 +1921,11 @@ static const struct edp_panel_entry edp_panels[] = {
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
-	EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"),
+	EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
+			 &auo_b116xa3_mode),
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
-	EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+	EDP_PANEL_ENTRY2('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1",
+			 &auo_b116xa3_mode),
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"),
 	EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
@@ -1910,9 +1967,9 @@ static const struct edp_panel_entry edp_panels[] = {
 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
-	EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
+	EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
 
 	EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"),
 
@@ -1926,6 +1983,8 @@ static const struct edp_panel_entry edp_panels[] = {
 	EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
 	EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
 
+	EDP_PANEL_ENTRY('S', 'D', 'C', 0x416d, &delay_100_500_e200, "ATNA45AF01"),
+
 	EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"),
 	EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"),
 	EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index c73243d85de71875db8b489125efbf3856afdf5e..ff0dc08b98297a30739ace085e3d4617e6a0b3e7 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -38,6 +38,7 @@
 #define HX8394_CMD_SETMIPI	  0xba
 #define HX8394_CMD_SETOTP	  0xbb
 #define HX8394_CMD_SETREGBANK	  0xbd
+#define HX8394_CMD_UNKNOWN5	  0xbf
 #define HX8394_CMD_UNKNOWN1	  0xc0
 #define HX8394_CMD_SETDGCLUT	  0xc1
 #define HX8394_CMD_SETID	  0xc3
@@ -52,6 +53,7 @@
 #define HX8394_CMD_SETGIP1	  0xd5
 #define HX8394_CMD_SETGIP2	  0xd6
 #define HX8394_CMD_SETGPO	  0xd6
+#define HX8394_CMD_UNKNOWN4	  0xd8
 #define HX8394_CMD_SETSCALING	  0xdd
 #define HX8394_CMD_SETIDLE	  0xdf
 #define HX8394_CMD_SETGAMMA	  0xe0
@@ -68,7 +70,7 @@ struct hx8394 {
 	struct gpio_desc *reset_gpio;
 	struct regulator *vcc;
 	struct regulator *iovcc;
-	bool prepared;
+	enum drm_panel_orientation orientation;
 
 	const struct hx8394_panel_desc *desc;
 };
@@ -203,6 +205,140 @@ static const struct hx8394_panel_desc hsd060bhw4_desc = {
 	.init_sequence = hsd060bhw4_init_sequence,
 };
 
+static int powkiddy_x55_init_sequence(struct hx8394 *ctx)
+{
+	struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+	/* 5.19.8 SETEXTC: Set extension command (B9h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+			       0xff, 0x83, 0x94);
+
+	/* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+			       0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+	/* 5.19.2 SETPOWER: Set power (B1h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+			       0x48, 0x12, 0x72, 0x09, 0x32, 0x54, 0x71, 0x71, 0x57, 0x47);
+
+	/* 5.19.3 SETDISP: Set display related register (B2h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+			       0x00, 0x80, 0x64, 0x2c, 0x16, 0x2f);
+
+	/* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+			       0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c, 0x86, 0x75,
+			       0x00, 0x3f, 0x73, 0x74, 0x73, 0x74, 0x73, 0x74, 0x01, 0x0c,
+			       0x86);
+
+	/* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+			       0x6e, 0x6e);
+
+	/* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+			       0x00, 0x00, 0x07, 0x07, 0x40, 0x07, 0x0c, 0x00, 0x08, 0x10,
+			       0x08, 0x00, 0x08, 0x54, 0x15, 0x0a, 0x05, 0x0a, 0x02, 0x15,
+			       0x06, 0x05, 0x06, 0x47, 0x44, 0x0a, 0x0a, 0x4b, 0x10, 0x07,
+			       0x07, 0x0c, 0x40);
+
+	/* 5.19.20 Set GIP Option1 (D5h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+			       0x1c, 0x1c, 0x1d, 0x1d, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+			       0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x24, 0x25, 0x18, 0x18,
+			       0x26, 0x27, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+			       0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x20, 0x21,
+			       0x18, 0x18, 0x18, 0x18);
+
+	/* 5.19.21 Set GIP Option2 (D6h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+			       0x1c, 0x1c, 0x1d, 0x1d, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+			       0x01, 0x00, 0x0b, 0x0a, 0x09, 0x08, 0x21, 0x20, 0x18, 0x18,
+			       0x27, 0x26, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+			       0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x25, 0x24,
+			       0x18, 0x18, 0x18, 0x18);
+
+	/* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+			       0x00, 0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56,
+			       0x65, 0x66, 0x6e, 0x82, 0x88, 0x8b, 0x9a, 0x9d, 0x98, 0xa8,
+			       0xb9, 0x5d, 0x5c, 0x61, 0x66, 0x6a, 0x6f, 0x7f, 0x7f, 0x00,
+			       0x0a, 0x15, 0x1b, 0x1e, 0x21, 0x24, 0x22, 0x47, 0x56, 0x65,
+			       0x65, 0x6e, 0x81, 0x87, 0x8b, 0x98, 0x9d, 0x99, 0xa8, 0xba,
+			       0x5d, 0x5d, 0x62, 0x67, 0x6b, 0x72, 0x7f, 0x7f);
+
+	/* Unknown command, not listed in the HX8394-F datasheet */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+			       0x1f, 0x31);
+
+	/* 5.19.17 SETPANEL (CCh) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+			       0x0b);
+
+	/* Unknown command, not listed in the HX8394-F datasheet */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+			       0x02);
+
+	/* 5.19.11 Set register bank (BDh) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+			       0x02);
+
+	/* Unknown command, not listed in the HX8394-F datasheet */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN4,
+			       0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+			       0xff, 0xff);
+
+	/* 5.19.11 Set register bank (BDh) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+			       0x00);
+
+	/* 5.19.11 Set register bank (BDh) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+			       0x01);
+
+	/* 5.19.2 SETPOWER: Set power (B1h) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+			       0x00);
+
+	/* 5.19.11 Set register bank (BDh) */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+			       0x00);
+
+	/* Unknown command, not listed in the HX8394-F datasheet */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN5,
+			       0x40, 0x81, 0x50, 0x00, 0x1a, 0xfc, 0x01);
+
+	/* Unknown command, not listed in the HX8394-F datasheet */
+	mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN2,
+			       0xed);
+
+	return 0;
+}
+
+static const struct drm_display_mode powkiddy_x55_mode = {
+	.hdisplay	= 720,
+	.hsync_start	= 720 + 44,
+	.hsync_end	= 720 + 44 + 20,
+	.htotal		= 720 + 44 + 20 + 20,
+	.vdisplay	= 1280,
+	.vsync_start	= 1280 + 12,
+	.vsync_end	= 1280 + 12 + 10,
+	.vtotal		= 1280 + 12 + 10 + 10,
+	.clock		= 63290,
+	.flags		= DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+	.width_mm	= 67,
+	.height_mm	= 121,
+};
+
+static const struct hx8394_panel_desc powkiddy_x55_desc = {
+	.mode = &powkiddy_x55_mode,
+	.lanes = 4,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+		      MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+	.format = MIPI_DSI_FMT_RGB888,
+	.init_sequence = powkiddy_x55_init_sequence,
+};
+
 static int hx8394_enable(struct drm_panel *panel)
 {
 	struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -262,16 +398,11 @@ static int hx8394_unprepare(struct drm_panel *panel)
 {
 	struct hx8394 *ctx = panel_to_hx8394(panel);
 
-	if (!ctx->prepared)
-		return 0;
-
 	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
 
 	regulator_disable(ctx->iovcc);
 	regulator_disable(ctx->vcc);
 
-	ctx->prepared = false;
-
 	return 0;
 }
 
@@ -280,9 +411,6 @@ static int hx8394_prepare(struct drm_panel *panel)
 	struct hx8394 *ctx = panel_to_hx8394(panel);
 	int ret;
 
-	if (ctx->prepared)
-		return 0;
-
 	gpiod_set_value_cansleep(ctx->reset_gpio, 1);
 
 	ret = regulator_enable(ctx->vcc);
@@ -301,8 +429,6 @@ static int hx8394_prepare(struct drm_panel *panel)
 
 	msleep(180);
 
-	ctx->prepared = true;
-
 	return 0;
 
 disable_vcc:
@@ -335,12 +461,20 @@ static int hx8394_get_modes(struct drm_panel *panel,
 	return 1;
 }
 
+static enum drm_panel_orientation hx8394_get_orientation(struct drm_panel *panel)
+{
+	struct hx8394 *ctx = panel_to_hx8394(panel);
+
+	return ctx->orientation;
+}
+
 static const struct drm_panel_funcs hx8394_drm_funcs = {
 	.disable   = hx8394_disable,
 	.unprepare = hx8394_unprepare,
 	.prepare   = hx8394_prepare,
 	.enable	   = hx8394_enable,
 	.get_modes = hx8394_get_modes,
+	.get_orientation = hx8394_get_orientation,
 };
 
 static int hx8394_probe(struct mipi_dsi_device *dsi)
@@ -358,6 +492,12 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
 		return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
 				     "Failed to get reset gpio\n");
 
+	ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
+	if (ret < 0) {
+		dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, ret);
+		return ret;
+	}
+
 	mipi_dsi_set_drvdata(dsi, ctx);
 
 	ctx->dev = dev;
@@ -401,27 +541,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
 	return 0;
 }
 
-static void hx8394_shutdown(struct mipi_dsi_device *dsi)
-{
-	struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
-	int ret;
-
-	ret = drm_panel_disable(&ctx->panel);
-	if (ret < 0)
-		dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
-
-	ret = drm_panel_unprepare(&ctx->panel);
-	if (ret < 0)
-		dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
-}
-
 static void hx8394_remove(struct mipi_dsi_device *dsi)
 {
 	struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
 	int ret;
 
-	hx8394_shutdown(dsi);
-
 	ret = mipi_dsi_detach(dsi);
 	if (ret < 0)
 		dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
@@ -431,6 +555,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
 
 static const struct of_device_id hx8394_of_match[] = {
 	{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+	{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, hx8394_of_match);
@@ -438,7 +563,6 @@ MODULE_DEVICE_TABLE(of, hx8394_of_match);
 static struct mipi_dsi_driver hx8394_driver = {
 	.probe	= hx8394_probe,
 	.remove = hx8394_remove,
-	.shutdown = hx8394_shutdown,
 	.driver = {
 		.name = DRV_NAME,
 		.of_match_table = hx8394_of_match,
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 7838947a1bf3c95e836559dd82b2854f9c1af78f..2ffe5f68a89034ea217cfcbe6a0a7f635c302d14 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -830,6 +830,203 @@ static const struct ili9881c_instr w552946ab_init[] = {
 	ILI9881C_SWITCH_PAGE_INSTR(0),
 };
 
+static const struct ili9881c_instr am8001280g_init[] = {
+	ILI9881C_SWITCH_PAGE_INSTR(3),
+	ILI9881C_COMMAND_INSTR(0x01, 0x00),
+	ILI9881C_COMMAND_INSTR(0x02, 0x00),
+	ILI9881C_COMMAND_INSTR(0x03, 0x73),
+	ILI9881C_COMMAND_INSTR(0x04, 0xD3),
+	ILI9881C_COMMAND_INSTR(0x05, 0x00),
+	ILI9881C_COMMAND_INSTR(0x06, 0x0A),
+	ILI9881C_COMMAND_INSTR(0x07, 0x0E),
+	ILI9881C_COMMAND_INSTR(0x08, 0x00),
+	ILI9881C_COMMAND_INSTR(0x09, 0x01),
+	ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+	ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+	ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+	ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+	ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+	ILI9881C_COMMAND_INSTR(0x0f, 0x01),
+	ILI9881C_COMMAND_INSTR(0x10, 0x01),
+	ILI9881C_COMMAND_INSTR(0x11, 0x00),
+	ILI9881C_COMMAND_INSTR(0x12, 0x00),
+	ILI9881C_COMMAND_INSTR(0x13, 0x00),
+	ILI9881C_COMMAND_INSTR(0x14, 0x00),
+	ILI9881C_COMMAND_INSTR(0x15, 0x00),
+	ILI9881C_COMMAND_INSTR(0x16, 0x00),
+	ILI9881C_COMMAND_INSTR(0x17, 0x00),
+	ILI9881C_COMMAND_INSTR(0x18, 0x00),
+	ILI9881C_COMMAND_INSTR(0x19, 0x00),
+	ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+	ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+	ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+	ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+	ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+	ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+	ILI9881C_COMMAND_INSTR(0x20, 0x06),
+	ILI9881C_COMMAND_INSTR(0x21, 0x01),
+	ILI9881C_COMMAND_INSTR(0x22, 0x00),
+	ILI9881C_COMMAND_INSTR(0x23, 0x00),
+	ILI9881C_COMMAND_INSTR(0x24, 0x00),
+	ILI9881C_COMMAND_INSTR(0x25, 0x00),
+	ILI9881C_COMMAND_INSTR(0x26, 0x00),
+	ILI9881C_COMMAND_INSTR(0x27, 0x00),
+	ILI9881C_COMMAND_INSTR(0x28, 0x33),
+	ILI9881C_COMMAND_INSTR(0x29, 0x03),
+	ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+	ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+	ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+	ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+	ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+	ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+	ILI9881C_COMMAND_INSTR(0x30, 0x00),
+	ILI9881C_COMMAND_INSTR(0x31, 0x00),
+	ILI9881C_COMMAND_INSTR(0x32, 0x00),
+	ILI9881C_COMMAND_INSTR(0x33, 0x00),
+	ILI9881C_COMMAND_INSTR(0x34, 0x03),
+	ILI9881C_COMMAND_INSTR(0x35, 0x00),
+	ILI9881C_COMMAND_INSTR(0x36, 0x03),
+	ILI9881C_COMMAND_INSTR(0x37, 0x00),
+	ILI9881C_COMMAND_INSTR(0x38, 0x00),
+	ILI9881C_COMMAND_INSTR(0x39, 0x00),
+	ILI9881C_COMMAND_INSTR(0x3a, 0x40),
+	ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+	ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+	ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+	ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+	ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+	ILI9881C_COMMAND_INSTR(0x40, 0x00),
+	ILI9881C_COMMAND_INSTR(0x41, 0x00),
+	ILI9881C_COMMAND_INSTR(0x42, 0x00),
+	ILI9881C_COMMAND_INSTR(0x43, 0x00),
+	ILI9881C_COMMAND_INSTR(0x44, 0x00),
+
+	ILI9881C_COMMAND_INSTR(0x50, 0x01),
+	ILI9881C_COMMAND_INSTR(0x51, 0x23),
+	ILI9881C_COMMAND_INSTR(0x52, 0x45),
+	ILI9881C_COMMAND_INSTR(0x53, 0x67),
+	ILI9881C_COMMAND_INSTR(0x54, 0x89),
+	ILI9881C_COMMAND_INSTR(0x55, 0xab),
+	ILI9881C_COMMAND_INSTR(0x56, 0x01),
+	ILI9881C_COMMAND_INSTR(0x57, 0x23),
+	ILI9881C_COMMAND_INSTR(0x58, 0x45),
+	ILI9881C_COMMAND_INSTR(0x59, 0x67),
+	ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+	ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+	ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+	ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+
+	ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+	ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+	ILI9881C_COMMAND_INSTR(0x60, 0x00),
+	ILI9881C_COMMAND_INSTR(0x61, 0x01),
+	ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+	ILI9881C_COMMAND_INSTR(0x63, 0x0C),
+	ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+	ILI9881C_COMMAND_INSTR(0x65, 0x0E),
+	ILI9881C_COMMAND_INSTR(0x66, 0x06),
+	ILI9881C_COMMAND_INSTR(0x67, 0x07),
+	ILI9881C_COMMAND_INSTR(0x68, 0x02),
+	ILI9881C_COMMAND_INSTR(0x69, 0x02),
+	ILI9881C_COMMAND_INSTR(0x6a, 0x08),
+	ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+	ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+	ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+	ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+	ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+	ILI9881C_COMMAND_INSTR(0x70, 0x02),
+	ILI9881C_COMMAND_INSTR(0x71, 0x02),
+	ILI9881C_COMMAND_INSTR(0x72, 0x02),
+	ILI9881C_COMMAND_INSTR(0x73, 0x02),
+	ILI9881C_COMMAND_INSTR(0x74, 0x02),
+	ILI9881C_COMMAND_INSTR(0x75, 0x02),
+	ILI9881C_COMMAND_INSTR(0x76, 0x00),
+	ILI9881C_COMMAND_INSTR(0x77, 0x01),
+	ILI9881C_COMMAND_INSTR(0x78, 0x0D),
+	ILI9881C_COMMAND_INSTR(0x79, 0x0C),
+	ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
+	ILI9881C_COMMAND_INSTR(0x7b, 0x0E),
+	ILI9881C_COMMAND_INSTR(0x7c, 0x06),
+	ILI9881C_COMMAND_INSTR(0x7d, 0x07),
+	ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+	ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+	ILI9881C_COMMAND_INSTR(0x80, 0x08),
+	ILI9881C_COMMAND_INSTR(0x81, 0x02),
+	ILI9881C_COMMAND_INSTR(0x82, 0x02),
+	ILI9881C_COMMAND_INSTR(0x83, 0x02),
+	ILI9881C_COMMAND_INSTR(0x84, 0x02),
+	ILI9881C_COMMAND_INSTR(0x85, 0x02),
+	ILI9881C_COMMAND_INSTR(0x86, 0x02),
+	ILI9881C_COMMAND_INSTR(0x87, 0x02),
+	ILI9881C_COMMAND_INSTR(0x88, 0x02),
+	ILI9881C_COMMAND_INSTR(0x89, 0x02),
+	ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+
+	ILI9881C_SWITCH_PAGE_INSTR(4),
+	ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+	ILI9881C_COMMAND_INSTR(0x6e, 0x30),
+	ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+	ILI9881C_COMMAND_INSTR(0x8d, 0x15),
+	ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+	ILI9881C_COMMAND_INSTR(0x87, 0xba),
+	ILI9881C_COMMAND_INSTR(0x26, 0x76),
+	ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+
+	ILI9881C_SWITCH_PAGE_INSTR(1),
+	ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+	ILI9881C_COMMAND_INSTR(0x31, 0x0B),
+	ILI9881C_COMMAND_INSTR(0x50, 0xa5),
+	ILI9881C_COMMAND_INSTR(0x51, 0xa0),
+	ILI9881C_COMMAND_INSTR(0x53, 0x70),
+	ILI9881C_COMMAND_INSTR(0x55, 0x7A),
+	ILI9881C_COMMAND_INSTR(0x60, 0x14),
+
+	ILI9881C_COMMAND_INSTR(0xA0, 0x00),
+	ILI9881C_COMMAND_INSTR(0xA1, 0x53),
+	ILI9881C_COMMAND_INSTR(0xA2, 0x50),
+	ILI9881C_COMMAND_INSTR(0xA3, 0x20),
+	ILI9881C_COMMAND_INSTR(0xA4, 0x27),
+	ILI9881C_COMMAND_INSTR(0xA5, 0x33),
+	ILI9881C_COMMAND_INSTR(0xA6, 0x25),
+	ILI9881C_COMMAND_INSTR(0xA7, 0x25),
+	ILI9881C_COMMAND_INSTR(0xA8, 0xD4),
+	ILI9881C_COMMAND_INSTR(0xA9, 0x1A),
+	ILI9881C_COMMAND_INSTR(0xAA, 0x2B),
+	ILI9881C_COMMAND_INSTR(0xAB, 0xB5),
+	ILI9881C_COMMAND_INSTR(0xAC, 0x19),
+	ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+	ILI9881C_COMMAND_INSTR(0xAE, 0x53),
+	ILI9881C_COMMAND_INSTR(0xAF, 0x1A),
+	ILI9881C_COMMAND_INSTR(0xB0, 0x25),
+	ILI9881C_COMMAND_INSTR(0xB1, 0x62),
+	ILI9881C_COMMAND_INSTR(0xB2, 0x6A),
+	ILI9881C_COMMAND_INSTR(0xB3, 0x31),
+
+	ILI9881C_COMMAND_INSTR(0xC0, 0x00),
+	ILI9881C_COMMAND_INSTR(0xC1, 0x53),
+	ILI9881C_COMMAND_INSTR(0xC2, 0x50),
+	ILI9881C_COMMAND_INSTR(0xC3, 0x20),
+	ILI9881C_COMMAND_INSTR(0xC4, 0x27),
+	ILI9881C_COMMAND_INSTR(0xC5, 0x33),
+	ILI9881C_COMMAND_INSTR(0xC6, 0x25),
+	ILI9881C_COMMAND_INSTR(0xC7, 0x25),
+	ILI9881C_COMMAND_INSTR(0xC8, 0xD4),
+	ILI9881C_COMMAND_INSTR(0xC9, 0x1A),
+	ILI9881C_COMMAND_INSTR(0xCA, 0x2B),
+	ILI9881C_COMMAND_INSTR(0xCB, 0xB5),
+	ILI9881C_COMMAND_INSTR(0xCC, 0x19),
+	ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+	ILI9881C_COMMAND_INSTR(0xCE, 0x53),
+	ILI9881C_COMMAND_INSTR(0xCF, 0x1A),
+	ILI9881C_COMMAND_INSTR(0xD0, 0x25),
+	ILI9881C_COMMAND_INSTR(0xD1, 0x62),
+	ILI9881C_COMMAND_INSTR(0xD2, 0x6A),
+	ILI9881C_COMMAND_INSTR(0xD3, 0x31),
+	ILI9881C_SWITCH_PAGE_INSTR(0),
+	ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c),
+	ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
+};
+
 static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
 {
 	return container_of(panel, struct ili9881c, panel);
@@ -1014,6 +1211,23 @@ static const struct drm_display_mode w552946aba_default_mode = {
 	.height_mm	= 121,
 };
 
+static const struct drm_display_mode am8001280g_default_mode = {
+	.clock		= 67911,
+
+	.hdisplay	= 800,
+	.hsync_start	= 800 + 20,
+	.hsync_end	= 800 + 20 + 32,
+	.htotal		= 800 + 20 + 32 + 20,
+
+	.vdisplay	= 1280,
+	.vsync_start	= 1280 + 6,
+	.vsync_end	= 1280 + 6 + 8,
+	.vtotal		= 1280 + 6 + 8 + 4,
+
+	.width_mm	= 94,
+	.height_mm	= 151,
+};
+
 static int ili9881c_get_modes(struct drm_panel *panel,
 			      struct drm_connector *connector)
 {
@@ -1094,6 +1308,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
 		return ret;
 	}
 
+	ctx->panel.prepare_prev_first = true;
+
 	ret = drm_panel_of_backlight(&ctx->panel);
 	if (ret)
 		return ret;
@@ -1145,11 +1361,20 @@ static const struct ili9881c_desc w552946aba_desc = {
 		      MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
 };
 
+static const struct ili9881c_desc am8001280g_desc = {
+	.init = am8001280g_init,
+	.init_length = ARRAY_SIZE(am8001280g_init),
+	.mode = &am8001280g_default_mode,
+	.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+		      MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
+};
+
 static const struct of_device_id ili9881c_of_match[] = {
 	{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
 	{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
 	{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
 	{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
+	{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9367a4572dcf645037803cb629ae0e1c5ff692ab..8017ad33cf18dfc991ac00a0e8d5e36ba928b510 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1324,6 +1324,35 @@ static const struct panel_desc bananapi_s070wv20_ct16 = {
 	},
 };
 
+static const struct drm_display_mode boe_bp101wx1_100_mode = {
+	.clock = 78945,
+	.hdisplay = 1280,
+	.hsync_start = 1280 + 0,
+	.hsync_end = 1280 + 0 + 2,
+	.htotal = 1280 + 62 + 0 + 2,
+	.vdisplay = 800,
+	.vsync_start = 800 + 8,
+	.vsync_end = 800 + 8 + 2,
+	.vtotal = 800 + 6 + 8 + 2,
+};
+
+static const struct panel_desc boe_bp101wx1_100 = {
+	.modes = &boe_bp101wx1_100_mode,
+	.num_modes = 1,
+	.bpc = 8,
+	.size = {
+		.width = 217,
+		.height = 136,
+	},
+	.delay = {
+		.enable = 50,
+		.disable = 50,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH,
+	.connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing boe_ev121wxm_n10_1850_timing = {
 	.pixelclock = { 69922000, 71000000, 72293000 },
 	.hactive = { 1280, 1280, 1280 },
@@ -1973,6 +2002,33 @@ static const struct panel_desc eink_vb3300_kca = {
 	.connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
+static const struct display_timing evervision_vgg644804_timing = {
+	.pixelclock = { 25175000, 25175000, 25175000 },
+	.hactive = { 640, 640, 640 },
+	.hfront_porch = { 16, 16, 16 },
+	.hback_porch = { 82, 114, 170 },
+	.hsync_len = { 5, 30, 30 },
+	.vactive = { 480, 480, 480 },
+	.vfront_porch = { 10, 10, 10 },
+	.vback_porch = { 30, 32, 34 },
+	.vsync_len = { 1, 3, 5 },
+	.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+		 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+		 DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc evervision_vgg644804 = {
+	.timings = &evervision_vgg644804_timing,
+	.num_timings = 1,
+	.bpc = 8,
+	.size = {
+		.width = 115,
+		.height = 86,
+	},
+	.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+	.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+};
+
 static const struct display_timing evervision_vgg804821_timing = {
 	.pixelclock = { 27600000, 33300000, 50000000 },
 	.hactive = { 800, 800, 800 },
@@ -4253,6 +4309,9 @@ static const struct of_device_id platform_of_match[] = {
 	}, {
 		.compatible = "bananapi,s070wv20-ct16",
 		.data = &bananapi_s070wv20_ct16,
+	}, {
+		.compatible = "boe,bp101wx1-100",
+		.data = &boe_bp101wx1_100,
 	}, {
 		.compatible = "boe,ev121wxm-n10-1850",
 		.data = &boe_ev121wxm_n10_1850,
@@ -4334,6 +4393,9 @@ static const struct of_device_id platform_of_match[] = {
 	}, {
 		.compatible = "eink,vb3300-kca",
 		.data = &eink_vb3300_kca,
+	}, {
+		.compatible = "evervision,vgg644804",
+		.data = &evervision_vgg644804,
 	}, {
 		.compatible = "evervision,vgg804821",
 		.data = &evervision_vgg804821,
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index c90ad5ee34e7aebdbf7a112460f9238e35bd4dca..a45e4addcc19a391c74c6a0c084b2be7a0f61373 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -421,6 +421,9 @@ static int panfrost_device_runtime_suspend(struct device *dev)
 		return -EBUSY;
 
 	panfrost_devfreq_suspend(pfdev);
+	panfrost_job_suspend_irq(pfdev);
+	panfrost_mmu_suspend_irq(pfdev);
+	panfrost_gpu_suspend_irq(pfdev);
 	panfrost_gpu_power_off(pfdev);
 
 	return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 0fc558db6bfd5713d28e799d1d4e64ea96afbada..62f7e35273857be642fc1f21deb80b65619ba842 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -25,6 +25,13 @@ struct panfrost_perfcnt;
 #define NUM_JOB_SLOTS 3
 #define MAX_PM_DOMAINS 5
 
+enum panfrost_drv_comp_bits {
+	PANFROST_COMP_BIT_GPU,
+	PANFROST_COMP_BIT_JOB,
+	PANFROST_COMP_BIT_MMU,
+	PANFROST_COMP_BIT_MAX
+};
+
 /**
  * enum panfrost_gpu_pm - Supported kernel power management features
  * @GPU_PM_CLK_DIS:  Allow disabling clocks during system suspend
@@ -94,6 +101,8 @@ struct panfrost_device {
 	struct device *dev;
 	struct drm_device *ddev;
 	struct platform_device *pdev;
+	int gpu_irq;
+	int mmu_irq;
 
 	void __iomem *iomem;
 	struct clk *clock;
@@ -107,6 +116,7 @@ struct panfrost_device {
 
 	struct panfrost_features features;
 	const struct panfrost_compatible *comp;
+	DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX);
 
 	spinlock_t as_lock;
 	unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 09f5e1563ebd43b742dc65da04d867e66616b618..9063ce2546422fd93eb0c0b847cab68aac0ee753 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -22,9 +22,13 @@
 static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
 {
 	struct panfrost_device *pfdev = data;
-	u32 state = gpu_read(pfdev, GPU_INT_STAT);
-	u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+	u32 fault_status, state;
 
+	if (test_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended))
+		return IRQ_NONE;
+
+	fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+	state = gpu_read(pfdev, GPU_INT_STAT);
 	if (!state)
 		return IRQ_NONE;
 
@@ -61,6 +65,8 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
 	gpu_write(pfdev, GPU_INT_MASK, 0);
 	gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
 
+	clear_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
 	gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
 	ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
 		val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
@@ -78,7 +84,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
 	}
 
 	gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
-	gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+	/* Only enable the interrupts we care about */
+	gpu_write(pfdev, GPU_INT_MASK,
+		  GPU_IRQ_MASK_ERROR |
+		  GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |
+		  GPU_IRQ_CLEAN_CACHES_COMPLETED);
 
 	/*
 	 * All in-flight jobs should have released their cycle
@@ -425,11 +436,10 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
 
 void panfrost_gpu_power_off(struct panfrost_device *pfdev)
 {
-	u64 core_mask = panfrost_get_core_mask(pfdev);
 	int ret;
 	u32 val;
 
-	gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present & core_mask);
+	gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present);
 	ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
 					 val, !val, 1, 1000);
 	if (ret)
@@ -441,16 +451,24 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
 	if (ret)
 		dev_err(pfdev->dev, "tiler power transition timeout");
 
-	gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present & core_mask);
+	gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
 	ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
 				 val, !val, 0, 1000);
 	if (ret)
 		dev_err(pfdev->dev, "l2 power transition timeout");
 }
 
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
+{
+	set_bit(PANFROST_COMP_BIT_GPU, pfdev->is_suspended);
+
+	gpu_write(pfdev, GPU_INT_MASK, 0);
+	synchronize_irq(pfdev->gpu_irq);
+}
+
 int panfrost_gpu_init(struct panfrost_device *pfdev)
 {
-	int err, irq;
+	int err;
 
 	err = panfrost_gpu_soft_reset(pfdev);
 	if (err)
@@ -465,11 +483,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
 
 	dma_set_max_seg_size(pfdev->dev, UINT_MAX);
 
-	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
-	if (irq < 0)
-		return irq;
+	pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+	if (pfdev->gpu_irq < 0)
+		return pfdev->gpu_irq;
 
-	err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+	err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
 			       IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
 	if (err) {
 		dev_err(pfdev->dev, "failed to request gpu irq");
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
index 876fdad9f7214bbc72c7971c22c4699d7e1f0e1e..d841b86504ea893659a5645e423265e3032d1707 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -15,6 +15,7 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
 int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
 void panfrost_gpu_power_on(struct panfrost_device *pfdev);
 void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev);
 
 void panfrost_cycle_counter_get(struct panfrost_device *pfdev);
 void panfrost_cycle_counter_put(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index f9446e197428d01307aef051116f135ba971fa6f..0c2dbf6ef2a554914b0306f4569477e99ae67705 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -405,6 +405,8 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
 	int j;
 	u32 irq_mask = 0;
 
+	clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
 	for (j = 0; j < NUM_JOB_SLOTS; j++) {
 		irq_mask |= MK_JS_MASK(j);
 	}
@@ -413,6 +415,14 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
 	job_write(pfdev, JOB_INT_MASK, irq_mask);
 }
 
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev)
+{
+	set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
+
+	job_write(pfdev, JOB_INT_MASK, 0);
+	synchronize_irq(pfdev->js->irq);
+}
+
 static void panfrost_job_handle_err(struct panfrost_device *pfdev,
 				    struct panfrost_job *job,
 				    unsigned int js)
@@ -792,17 +802,25 @@ static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
 	struct panfrost_device *pfdev = data;
 
 	panfrost_job_handle_irqs(pfdev);
-	job_write(pfdev, JOB_INT_MASK,
-		  GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
-		  GENMASK(NUM_JOB_SLOTS - 1, 0));
+
+	/* Enable interrupts only if we're not about to get suspended */
+	if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+		job_write(pfdev, JOB_INT_MASK,
+			  GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
+			  GENMASK(NUM_JOB_SLOTS - 1, 0));
+
 	return IRQ_HANDLED;
 }
 
 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
 {
 	struct panfrost_device *pfdev = data;
-	u32 status = job_read(pfdev, JOB_INT_STAT);
+	u32 status;
+
+	if (test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
+		return IRQ_NONE;
 
+	status = job_read(pfdev, JOB_INT_STAT);
 	if (!status)
 		return IRQ_NONE;
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 17ff808dba078f4522c7dd03081c1bed8800aa28..ec581b97852b61847330c78b77b97b049d042b9f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -47,6 +47,7 @@ int panfrost_job_get_slot(struct panfrost_job *job);
 int panfrost_job_push(struct panfrost_job *job);
 void panfrost_job_put(struct panfrost_job *job);
 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
 int panfrost_job_is_idle(struct panfrost_device *pfdev);
 
 #endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 846dd697c4106b091439c245e09069b691b51705..f38385fe76bbb45d92bf75cf078faec1f8be52ff 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -231,6 +231,8 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
 {
 	struct panfrost_mmu *mmu, *mmu_tmp;
 
+	clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
 	spin_lock(&pfdev->as_lock);
 
 	pfdev->as_alloc_mask = 0;
@@ -670,6 +672,9 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
 {
 	struct panfrost_device *pfdev = data;
 
+	if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
+		return IRQ_NONE;
+
 	if (!mmu_read(pfdev, MMU_INT_STAT))
 		return IRQ_NONE;
 
@@ -744,22 +749,25 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
 			status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
 	}
 
-	spin_lock(&pfdev->as_lock);
-	mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
-	spin_unlock(&pfdev->as_lock);
+	/* Enable interrupts only if we're not about to get suspended */
+	if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
+		spin_lock(&pfdev->as_lock);
+		mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
+		spin_unlock(&pfdev->as_lock);
+	}
 
 	return IRQ_HANDLED;
 };
 
 int panfrost_mmu_init(struct panfrost_device *pfdev)
 {
-	int err, irq;
+	int err;
 
-	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
-	if (irq < 0)
-		return irq;
+	pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+	if (pfdev->mmu_irq < 0)
+		return pfdev->mmu_irq;
 
-	err = devm_request_threaded_irq(pfdev->dev, irq,
+	err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
 					panfrost_mmu_irq_handler,
 					panfrost_mmu_irq_handler_thread,
 					IRQF_SHARED, KBUILD_MODNAME "-mmu",
@@ -777,3 +785,11 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev)
 {
 	mmu_write(pfdev, MMU_INT_MASK, 0);
 }
+
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
+{
+	set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
+
+	mmu_write(pfdev, MMU_INT_MASK, 0);
+	synchronize_irq(pfdev->mmu_irq);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index cc2a0d307febcd2971e8ed046500100b43f1c1d5..022a9a74a114195266498992be7a5168b97ffa9d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -14,6 +14,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
 int panfrost_mmu_init(struct panfrost_device *pfdev);
 void panfrost_mmu_fini(struct panfrost_device *pfdev);
 void panfrost_mmu_reset(struct panfrost_device *pfdev);
+void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev);
 
 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 404b0483bb7cb91d98703f1cf13a404cb642d1d6..c6d35c33d5d636dad6344a4a6831618d1cdb3dc3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -485,7 +485,6 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
 static int qxl_primary_apply_cursor(struct qxl_device *qdev,
 				    struct drm_plane_state *plane_state)
 {
-	struct drm_framebuffer *fb = plane_state->fb;
 	struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
 	struct qxl_cursor_cmd *cmd;
 	struct qxl_release *release;
@@ -510,8 +509,8 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev,
 
 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
 	cmd->type = QXL_CURSOR_SET;
-	cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
-	cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
+	cmd->u.set.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+	cmd->u.set.position.y = plane_state->crtc_y + plane_state->hotspot_y;
 
 	cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
 
@@ -531,7 +530,6 @@ static int qxl_primary_apply_cursor(struct qxl_device *qdev,
 static int qxl_primary_move_cursor(struct qxl_device *qdev,
 				   struct drm_plane_state *plane_state)
 {
-	struct drm_framebuffer *fb = plane_state->fb;
 	struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
 	struct qxl_cursor_cmd *cmd;
 	struct qxl_release *release;
@@ -554,8 +552,8 @@ static int qxl_primary_move_cursor(struct qxl_device *qdev,
 
 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
 	cmd->type = QXL_CURSOR_MOVE;
-	cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
-	cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
+	cmd->u.position.x = plane_state->crtc_x + plane_state->hotspot_x;
+	cmd->u.position.y = plane_state->crtc_y + plane_state->hotspot_y;
 	qxl_release_unmap(qdev, release, &cmd->release_info);
 
 	qxl_release_fence_buffer_objects(release);
@@ -851,8 +849,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
 		struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
 
 		qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
-						     new_state->fb->hot_x,
-						     new_state->fb->hot_y);
+						     new_state->hotspot_x,
+						     new_state->hotspot_y);
 		qxl_free_cursor(old_cursor_bo);
 	}
 
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 46de4f171970b61571ab56fb237826d33c5303c5..beee5563031aa8c01dbacd0ca459336c2b0eb063 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -285,7 +285,7 @@ static const struct drm_ioctl_desc qxl_ioctls[] = {
 };
 
 static struct drm_driver qxl_driver = {
-	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
 
 	.dumb_create = qxl_mode_dumb_create,
 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 307a890fde133c3f2218639c6a385e72d3b49122..32069acd93f877bdbe8332362cbd2c0b0a5b0e5e 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -119,7 +119,6 @@ struct qxl_output {
 
 #define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
 #define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
-#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
 
 struct qxl_mman {
 	struct ttm_device		bdev;
@@ -256,8 +255,6 @@ struct qxl_device {
 
 #define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
 
-int qxl_debugfs_fence_init(struct qxl_device *rdev);
-
 int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
 void qxl_device_fini(struct qxl_device *qdev);
 
@@ -344,8 +341,6 @@ qxl_image_alloc_objects(struct qxl_device *qdev,
 			int height, int stride);
 void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
 
-void qxl_update_screen(struct qxl_device *qxl);
-
 /* qxl io operations (qxl_cmd.c) */
 
 void qxl_io_create_primary(struct qxl_device *qdev,
@@ -445,8 +440,6 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
 
 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
 
-struct qxl_drv_surface *
-qxl_surface_lookup(struct drm_device *dev, int surface_id);
 void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
 
 /* qxl_ioctl.c */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 34a1c73d3938f1b092d6afa09536a561b8604b70..02a65971d140d3044b3609938289b995b6f34ee5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -33,7 +33,6 @@
 
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
-#include <drm/drm_legacy.h>
 
 #include "radeon_family.h"
 
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
index 8f9a728affde8375fb71633426072cb0f17c0d6c..07ad17d24294d5e6105a640b3e1ee2bb65889721 100644
--- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
@@ -14,7 +14,6 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_dma_helper.h>
-#include <drm/drm_plane_helper.h>
 
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4d42b1e4daa67fcacb382094a7c04af1dab69384..3c4f5a392b0646124a88b81715f4463d4db19ff1 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,12 +81,16 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 		 */
 		pr_warn("%s: called with uninitialized scheduler\n", __func__);
 	} else if (num_sched_list) {
-		/* The "priority" of an entity cannot exceed the number
-		 * of run-queues of a scheduler.
+		/* The "priority" of an entity cannot exceed the number of run-queues of a
+		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
+		 * the lowest priority available.
 		 */
-		if (entity->priority >= sched_list[0]->num_rqs)
-			entity->priority = max_t(u32, sched_list[0]->num_rqs,
-						 DRM_SCHED_PRIORITY_MIN);
+		if (entity->priority >= sched_list[0]->num_rqs) {
+			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
+				entity->priority, sched_list[0]->num_rqs);
+			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
+						 (s32) DRM_SCHED_PRIORITY_KERNEL);
+		}
 		entity->rq = sched_list[0]->sched_rq[entity->priority];
 	}
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 044a8c4875ba647cedc9d2a20b3e0ad7d3512cef..550492a7a031d7827b2e167098c495908bee82aa 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1029,9 +1029,8 @@ EXPORT_SYMBOL(drm_sched_job_cleanup);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched,
 		      struct drm_sched_entity *entity)
 {
-	if (drm_sched_entity_is_ready(entity))
-		if (drm_sched_can_queue(sched, entity))
-			drm_sched_run_job_queue(sched);
+	if (drm_sched_can_queue(sched, entity))
+		drm_sched_run_job_queue(sched);
 }
 
 /**
@@ -1051,8 +1050,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 	struct drm_sched_entity *entity;
 	int i;
 
-	/* Kernel run queue has higher priority than normal run queue*/
-	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+	/* Start with the highest priority.
+	 */
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
 			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
 			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
@@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 	if (!sched->sched_rq)
 		goto Out_free;
 	sched->num_rqs = num_rqs;
-	for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
 		if (!sched->sched_rq[i])
 			goto Out_unroll;
@@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 	sched->ready = true;
 	return 0;
 Out_unroll:
-	for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
+	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
 		kfree(sched->sched_rq[i]);
 Out_free:
 	kfree(sched->sched_rq);
@@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
 
 	drm_sched_wqueue_stop(sched);
 
-	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		struct drm_sched_rq *rq = sched->sched_rq[i];
 
 		spin_lock(&rq->lock);
@@ -1390,9 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 		atomic_inc(&bad->karma);
 
-		for (i = DRM_SCHED_PRIORITY_MIN;
-		     i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
-		     i++) {
+		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
 			struct drm_sched_rq *rq = sched->sched_rq[i];
 
 			spin_lock(&rq->lock);
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index acf7cedf0c1ab31dd60a62b99610c0ae4cd46e71..075c5c3ee75ac195340cff5b465da8a18d446b69 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -17,7 +17,6 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_encoder.h>
-#include <drm/drm_plane_helper.h>
 
 #include <linux/regmap.h>
 
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 2645af241ff0b8af1f04e3ba67e2306ee994dc96..d6183b3d768800b1f8886db577626c8495d7dc01 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
 	drm_format_helper_test.o \
 	drm_format_test.o \
 	drm_framebuffer_test.o \
+	drm_gem_shmem_test.o \
 	drm_managed_test.o \
 	drm_mm_test.o \
 	drm_modes_test.o \
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
new file mode 100644
index 0000000000000000000000000000000000000000..91202e40cde9463c658d8becb3d3574784b18e2d
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test suite for GEM objects backed by shmem buffers
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+#include <linux/sizes.h>
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_kunit_helpers.h>
+
+#define TEST_SIZE		SZ_1M
+#define TEST_BYTE		0xae
+
+/*
+ * Wrappers to avoid an explicit type casting when passing action
+ * functions to kunit_add_action().
+ */
+static void kfree_wrapper(void *ptr)
+{
+	const void *obj = ptr;
+
+	kfree(obj);
+}
+
+static void sg_free_table_wrapper(void *ptr)
+{
+	struct sg_table *sgt = ptr;
+
+	sg_free_table(sgt);
+}
+
+static void drm_gem_shmem_free_wrapper(void *ptr)
+{
+	struct drm_gem_shmem_object *shmem = ptr;
+
+	drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object backed by shmem buffer. The test
+ * case succeeds if the GEM object is successfully allocated with the
+ * shmem file node and object functions attributes set, and the size
+ * attribute is equal to the correct size.
+ */
+static void drm_gem_shmem_test_obj_create(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+	KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
+	KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
+	KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
+
+	drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test creating a shmem GEM object from a scatter/gather table exported
+ * via a DMA-BUF. The test case succeed if the GEM object is successfully
+ * created with the shmem file node attribute equal to NULL and the sgt
+ * attribute pointing to the scatter/gather table that has been imported.
+ */
+static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	struct drm_gem_object *gem_obj;
+	struct dma_buf buf_mock;
+	struct dma_buf_attachment attach_mock;
+	struct sg_table *sgt;
+	char *buf;
+	int ret;
+
+	/* Create a mock scatter/gather table */
+	buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
+	KUNIT_ASSERT_NOT_NULL(test, buf);
+
+	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+	KUNIT_ASSERT_NOT_NULL(test, sgt);
+
+	ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	sg_init_one(sgt->sgl, buf, TEST_SIZE);
+
+	/* Init a mock DMA-BUF */
+	buf_mock.size = TEST_SIZE;
+	attach_mock.dmabuf = &buf_mock;
+
+	gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
+	KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
+	KUNIT_EXPECT_NULL(test, gem_obj->filp);
+	KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
+
+	/* The scatter/gather table will be freed by drm_gem_shmem_free */
+	kunit_remove_action(test, sg_free_table_wrapper, sgt);
+	kunit_remove_action(test, kfree_wrapper, sgt);
+
+	shmem = to_drm_gem_shmem_obj(gem_obj);
+	KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
+
+	drm_gem_shmem_free(shmem);
+}
+
+/*
+ * Test pinning backing pages for a shmem GEM object. The test case
+ * succeeds if a suitable number of backing pages are allocated, and
+ * the pages table counter attribute is increased by one.
+ */
+static void drm_gem_shmem_test_pin_pages(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	int i, ret;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+	KUNIT_EXPECT_NULL(test, shmem->pages);
+	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+
+	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = drm_gem_shmem_pin(shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+
+	for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
+		KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
+
+	drm_gem_shmem_unpin(shmem);
+	KUNIT_EXPECT_NULL(test, shmem->pages);
+	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
+}
+
+/*
+ * Test creating a virtual mapping for a shmem GEM object. The test
+ * case succeeds if the backing memory is mapped and the reference
+ * counter for virtual mapping is increased by one. Moreover, the test
+ * case writes and then reads a test pattern over the mapped memory.
+ */
+static void drm_gem_shmem_test_vmap(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	struct iosys_map map;
+	int ret, i;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+	KUNIT_EXPECT_NULL(test, shmem->vaddr);
+	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+
+	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = drm_gem_shmem_vmap(shmem, &map);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+	KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
+	KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
+	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
+
+	iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
+	for (i = 0; i < TEST_SIZE; i++)
+		KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
+
+	drm_gem_shmem_vunmap(shmem, &map);
+	KUNIT_EXPECT_NULL(test, shmem->vaddr);
+	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
+}
+
+/*
+ * Test exporting a scatter/gather table of pinned pages suitable for
+ * PRIME usage from a shmem GEM object. The test case succeeds if a
+ * scatter/gather table large enough to accommodate the backing memory
+ * is successfully exported.
+ */
+static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	struct sg_table *sgt;
+	struct scatterlist *sg;
+	unsigned int si, len = 0;
+	int ret;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = drm_gem_shmem_pin(shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	sgt = drm_gem_shmem_get_sg_table(shmem);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+	KUNIT_EXPECT_NULL(test, shmem->sgt);
+
+	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	for_each_sgtable_sg(sgt, sg, si) {
+		KUNIT_EXPECT_NOT_NULL(test, sg);
+		len += sg->length;
+	}
+
+	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test pinning pages and exporting a scatter/gather table suitable for
+ * driver usage from a shmem GEM object. The test case succeeds if the
+ * backing pages are pinned and a scatter/gather table large enough to
+ * accommodate the backing memory is successfully exported.
+ */
+static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	struct sg_table *sgt;
+	struct scatterlist *sg;
+	unsigned int si, ret, len = 0;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	/* The scatter/gather table will be freed by drm_gem_shmem_free */
+	sgt = drm_gem_shmem_get_pages_sgt(shmem);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
+	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
+	KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
+
+	for_each_sgtable_sg(sgt, sg, si) {
+		KUNIT_EXPECT_NOT_NULL(test, sg);
+		len += sg->length;
+	}
+
+	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
+}
+
+/*
+ * Test updating the madvise state of a shmem GEM object. The test
+ * case checks that the function for setting madv updates it only if
+ * its current value is greater or equal than zero and returns false
+ * if it has a negative value.
+ */
+static void drm_gem_shmem_test_madvise(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+	KUNIT_ASSERT_EQ(test, shmem->madv, 0);
+
+	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = drm_gem_shmem_madvise(shmem, 1);
+	KUNIT_EXPECT_TRUE(test, ret);
+	KUNIT_ASSERT_EQ(test, shmem->madv, 1);
+
+	/* Set madv to a negative value */
+	ret = drm_gem_shmem_madvise(shmem, -1);
+	KUNIT_EXPECT_FALSE(test, ret);
+	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+
+	/* Check that madv cannot be set back to a positive value */
+	ret = drm_gem_shmem_madvise(shmem, 0);
+	KUNIT_EXPECT_FALSE(test, ret);
+	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
+}
+
+/*
+ * Test purging a shmem GEM object. First, assert that a newly created
+ * shmem GEM object is not purgeable. Then, set madvise to a positive
+ * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
+ * backing pages. Finally, assert that the shmem GEM object is now
+ * purgeable and purge it.
+ */
+static void drm_gem_shmem_test_purge(struct kunit *test)
+{
+	struct drm_device *drm_dev = test->priv;
+	struct drm_gem_shmem_object *shmem;
+	struct sg_table *sgt;
+	int ret;
+
+	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
+
+	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
+	KUNIT_ASSERT_EQ(test, ret, 0);
+
+	ret = drm_gem_shmem_is_purgeable(shmem);
+	KUNIT_EXPECT_FALSE(test, ret);
+
+	ret = drm_gem_shmem_madvise(shmem, 1);
+	KUNIT_EXPECT_TRUE(test, ret);
+
+	/* The scatter/gather table will be freed by drm_gem_shmem_free */
+	sgt = drm_gem_shmem_get_pages_sgt(shmem);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
+
+	ret = drm_gem_shmem_is_purgeable(shmem);
+	KUNIT_EXPECT_TRUE(test, ret);
+
+	drm_gem_shmem_purge(shmem);
+	KUNIT_EXPECT_NULL(test, shmem->pages);
+	KUNIT_EXPECT_NULL(test, shmem->sgt);
+	KUNIT_EXPECT_EQ(test, shmem->madv, -1);
+}
+
+static int drm_gem_shmem_test_init(struct kunit *test)
+{
+	struct device *dev;
+	struct drm_device *drm_dev;
+
+	/* Allocate a parent device */
+	dev = drm_kunit_helper_alloc_device(test);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+	/*
+	 * The DRM core will automatically initialize the GEM core and create
+	 * a DRM Memory Manager object which provides an address space pool
+	 * for GEM objects allocation.
+	 */
+	drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
+						      0, DRIVER_GEM);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
+
+	test->priv = drm_dev;
+
+	return 0;
+}
+
+static struct kunit_case drm_gem_shmem_test_cases[] = {
+	KUNIT_CASE(drm_gem_shmem_test_obj_create),
+	KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
+	KUNIT_CASE(drm_gem_shmem_test_pin_pages),
+	KUNIT_CASE(drm_gem_shmem_test_vmap),
+	KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
+	KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
+	KUNIT_CASE(drm_gem_shmem_test_madvise),
+	KUNIT_CASE(drm_gem_shmem_test_purge),
+	{}
+};
+
+static struct kunit_suite drm_gem_shmem_suite = {
+	.name = "drm_gem_shmem",
+	.init = drm_gem_shmem_test_init,
+	.test_cases = drm_gem_shmem_test_cases
+};
+
+kunit_test_suite(drm_gem_shmem_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 5e5e466f35d10c50320c42666692a593fe5efd25..5f838980c7a1137577ffef44fef512e692923297 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -169,14 +169,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
 	struct tidss_device *tidss = to_tidss(ddev);
 	unsigned long flags;
 
-	dev_dbg(ddev->dev,
-		"%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
-		crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
-		crtc->state->enable, crtc->state->event);
-
-	/* There is nothing to do if CRTC is not going to be enabled. */
-	if (!crtc->state->enable)
-		return;
+	dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
+		__func__, crtc->name, crtc->state->active ? "" : "not ",
+		drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
+		crtc->state->event);
 
 	/*
 	 * Flush CRTC changes with go bit only if new modeset is not
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 9d9dee7abaefddaa7d45ebc001651758a20035d5..1ad711f8d2a8bfccc14bc4f6dfc06b4758c85b7e 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -322,6 +322,60 @@ const struct dispc_features dispc_am625_feats = {
 	.vid_order = { 1, 0 },
 };
 
+const struct dispc_features dispc_am62a7_feats = {
+	/*
+	 * if the code reaches dispc_mode_valid with VP1,
+	 * it should return MODE_BAD.
+	 */
+	.max_pclk_khz = {
+		[DISPC_VP_TIED_OFF] = 0,
+		[DISPC_VP_DPI] = 165000,
+	},
+
+	.scaling = {
+		.in_width_max_5tap_rgb = 1280,
+		.in_width_max_3tap_rgb = 2560,
+		.in_width_max_5tap_yuv = 2560,
+		.in_width_max_3tap_yuv = 4096,
+		.upscale_limit = 16,
+		.downscale_limit_5tap = 4,
+		.downscale_limit_3tap = 2,
+		/*
+		 * The max supported pixel inc value is 255. The value
+		 * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+		 * The maximum bpp of all formats supported by the HW
+		 * is 8. So the maximum supported xinc value is 32,
+		 * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+		 */
+		.xinc_max = 32,
+	},
+
+	.subrev = DISPC_AM62A7,
+
+	.common = "common",
+	.common_regs = tidss_am65x_common_regs,
+
+	.num_vps = 2,
+	.vp_name = { "vp1", "vp2" },
+	.ovr_name = { "ovr1", "ovr2" },
+	.vpclk_name =  { "vp1", "vp2" },
+	/* VP1 of the DSS in AM62A7 SoC is tied off internally */
+	.vp_bus_type = { DISPC_VP_TIED_OFF, DISPC_VP_DPI },
+
+	.vp_feat = { .color = {
+			.has_ctm = true,
+			.gamma_size = 256,
+			.gamma_type = TIDSS_GAMMA_8BIT,
+		},
+	},
+
+	.num_planes = 2,
+	/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+	.vid_name = { "vid", "vidl1" },
+	.vid_lite = { false, true, },
+	.vid_order = { 1, 0 },
+};
+
 static const u16 *dispc_common_regmap;
 
 struct dss_vp_data {
@@ -824,6 +878,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
 	case DISPC_K2G:
 		return dispc_k2g_read_and_clear_irqstatus(dispc);
 	case DISPC_AM625:
+	case DISPC_AM62A7:
 	case DISPC_AM65X:
 	case DISPC_J721E:
 		return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -840,6 +895,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
 		dispc_k2g_set_irqenable(dispc, mask);
 		break;
 	case DISPC_AM625:
+	case DISPC_AM62A7:
 	case DISPC_AM65X:
 	case DISPC_J721E:
 		dispc_k3_set_irqenable(dispc, mask);
@@ -1331,6 +1387,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
 					x, y, layer);
 		break;
 	case DISPC_AM625:
+	case DISPC_AM62A7:
 	case DISPC_AM65X:
 		dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
 					  x, y, layer);
@@ -2250,6 +2307,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
 		dispc_k2g_plane_init(dispc);
 		break;
 	case DISPC_AM625:
+	case DISPC_AM62A7:
 	case DISPC_AM65X:
 	case DISPC_J721E:
 		dispc_k3_plane_init(dispc);
@@ -2357,6 +2415,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
 		dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
 		break;
 	case DISPC_AM625:
+	case DISPC_AM62A7:
 	case DISPC_AM65X:
 		dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
 		break;
@@ -2702,18 +2761,83 @@ static void dispc_init_errata(struct dispc_device *dispc)
 	}
 }
 
-static void dispc_softreset(struct dispc_device *dispc)
+/*
+ * K2G display controller does not support soft reset, so we do a basic manual
+ * reset here: make sure the IRQs are masked and VPs are disabled.
+ */
+static void dispc_softreset_k2g(struct dispc_device *dispc)
+{
+	dispc_set_irqenable(dispc, 0);
+	dispc_read_and_clear_irqstatus(dispc);
+
+	for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
+		VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+static int dispc_softreset(struct dispc_device *dispc)
 {
 	u32 val;
-	int ret = 0;
+	int ret;
+
+	if (dispc->feat->subrev == DISPC_K2G) {
+		dispc_softreset_k2g(dispc);
+		return 0;
+	}
 
 	/* Soft reset */
 	REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
 	/* Wait for reset to complete */
 	ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
 				 val, val & 1, 100, 5000);
+	if (ret) {
+		dev_err(dispc->dev, "failed to reset dispc\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dispc_init_hw(struct dispc_device *dispc)
+{
+	struct device *dev = dispc->dev;
+	int ret;
+
+	ret = pm_runtime_set_active(dev);
+	if (ret) {
+		dev_err(dev, "Failed to set DSS PM to active\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(dispc->fclk);
+	if (ret) {
+		dev_err(dev, "Failed to enable DSS fclk\n");
+		goto err_runtime_suspend;
+	}
+
+	ret = dispc_softreset(dispc);
 	if (ret)
-		dev_warn(dispc->dev, "failed to reset dispc\n");
+		goto err_clk_disable;
+
+	clk_disable_unprepare(dispc->fclk);
+	ret = pm_runtime_set_suspended(dev);
+	if (ret) {
+		dev_err(dev, "Failed to set DSS PM to suspended\n");
+		return ret;
+	}
+
+	return 0;
+
+err_clk_disable:
+	clk_disable_unprepare(dispc->fclk);
+
+err_runtime_suspend:
+	ret = pm_runtime_set_suspended(dev);
+	if (ret) {
+		dev_err(dev, "Failed to set DSS PM to suspended\n");
+		return ret;
+	}
+
+	return ret;
 }
 
 int dispc_init(struct tidss_device *tidss)
@@ -2777,10 +2901,6 @@ int dispc_init(struct tidss_device *tidss)
 			return r;
 	}
 
-	/* K2G display controller does not support soft reset */
-	if (feat->subrev != DISPC_K2G)
-		dispc_softreset(dispc);
-
 	for (i = 0; i < dispc->feat->num_vps; i++) {
 		u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
 		u32 *gamma_table;
@@ -2829,6 +2949,10 @@ int dispc_init(struct tidss_device *tidss)
 	of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
 			     &dispc->memory_bandwidth_limit);
 
+	r = dispc_init_hw(dispc);
+	if (r)
+		return r;
+
 	tidss->dispc = dispc;
 
 	return 0;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 33ac5ad7a423d54a6bfc653747ab2738d6a3b429..086327d51a903f6af0b686695bb3611b249de2d8 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -54,12 +54,14 @@ enum dispc_vp_bus_type {
 	DISPC_VP_DPI,		/* DPI output */
 	DISPC_VP_OLDI,		/* OLDI (LVDS) output */
 	DISPC_VP_INTERNAL,	/* SoC internal routing */
+	DISPC_VP_TIED_OFF,	/* Tied off / Unavailable */
 	DISPC_VP_MAX_BUS_TYPE,
 };
 
 enum dispc_dss_subrevision {
 	DISPC_K2G,
 	DISPC_AM625,
+	DISPC_AM62A7,
 	DISPC_AM65X,
 	DISPC_J721E,
 };
@@ -88,6 +90,7 @@ struct dispc_features {
 
 extern const struct dispc_features dispc_k2g_feats;
 extern const struct dispc_features dispc_am625_feats;
+extern const struct dispc_features dispc_am62a7_feats;
 extern const struct dispc_features dispc_am65x_feats;
 extern const struct dispc_features dispc_j721e_feats;
 
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 4d063eb9cd0b747229e0797c037a586dc63aa5e0..d15f836dca95c4ed3b391f76aa4c2cdf177fdb5e 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -32,9 +32,9 @@ int tidss_runtime_get(struct tidss_device *tidss)
 
 	dev_dbg(tidss->dev, "%s\n", __func__);
 
-	r = pm_runtime_get_sync(tidss->dev);
+	r = pm_runtime_resume_and_get(tidss->dev);
 	WARN_ON(r < 0);
-	return r < 0 ? r : 0;
+	return r;
 }
 
 void tidss_runtime_put(struct tidss_device *tidss)
@@ -43,7 +43,9 @@ void tidss_runtime_put(struct tidss_device *tidss)
 
 	dev_dbg(tidss->dev, "%s\n", __func__);
 
-	r = pm_runtime_put_sync(tidss->dev);
+	pm_runtime_mark_last_busy(tidss->dev);
+
+	r = pm_runtime_put_autosuspend(tidss->dev);
 	WARN_ON(r < 0);
 }
 
@@ -136,6 +138,8 @@ static int tidss_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, tidss);
 
+	spin_lock_init(&tidss->wait_lock);
+
 	ret = dispc_init(tidss);
 	if (ret) {
 		dev_err(dev, "failed to initialize dispc: %d\n", ret);
@@ -144,6 +148,9 @@ static int tidss_probe(struct platform_device *pdev)
 
 	pm_runtime_enable(dev);
 
+	pm_runtime_set_autosuspend_delay(dev, 1000);
+	pm_runtime_use_autosuspend(dev);
+
 #ifndef CONFIG_PM
 	/* If we don't have PM, we need to call resume manually */
 	dispc_runtime_resume(tidss->dispc);
@@ -192,6 +199,7 @@ static int tidss_probe(struct platform_device *pdev)
 #ifndef CONFIG_PM
 	dispc_runtime_suspend(tidss->dispc);
 #endif
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
 
 	return ret;
@@ -215,6 +223,7 @@ static void tidss_remove(struct platform_device *pdev)
 	/* If we don't have PM, we need to call suspend manually */
 	dispc_runtime_suspend(tidss->dispc);
 #endif
+	pm_runtime_dont_use_autosuspend(dev);
 	pm_runtime_disable(dev);
 
 	/* devm allocated dispc goes away with the dev so mark it NULL */
@@ -231,6 +240,7 @@ static void tidss_shutdown(struct platform_device *pdev)
 static const struct of_device_id tidss_of_table[] = {
 	{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
 	{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
+	{ .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
 	{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
 	{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
 	{ }
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 0c681c7600bcb290afc0eae3c626ed9c943bc85a..604334ef526a04ad5d4d133261beaa31f33106e2 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -93,33 +93,21 @@ void tidss_irq_resume(struct tidss_device *tidss)
 	spin_unlock_irqrestore(&tidss->wait_lock, flags);
 }
 
-static void tidss_irq_preinstall(struct drm_device *ddev)
-{
-	struct tidss_device *tidss = to_tidss(ddev);
-
-	spin_lock_init(&tidss->wait_lock);
-
-	tidss_runtime_get(tidss);
-
-	dispc_set_irqenable(tidss->dispc, 0);
-	dispc_read_and_clear_irqstatus(tidss->dispc);
-
-	tidss_runtime_put(tidss);
-}
-
-static void tidss_irq_postinstall(struct drm_device *ddev)
+int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
 {
 	struct tidss_device *tidss = to_tidss(ddev);
-	unsigned long flags;
-	unsigned int i;
+	int ret;
 
-	tidss_runtime_get(tidss);
+	if (irq == IRQ_NOTCONNECTED)
+		return -ENOTCONN;
 
-	spin_lock_irqsave(&tidss->wait_lock, flags);
+	ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
+	if (ret)
+		return ret;
 
 	tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
 
-	for (i = 0; i < tidss->num_crtcs; ++i) {
+	for (unsigned int i = 0; i < tidss->num_crtcs; ++i) {
 		struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
 
 		tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
@@ -127,28 +115,6 @@ static void tidss_irq_postinstall(struct drm_device *ddev)
 		tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
 	}
 
-	tidss_irq_update(tidss);
-
-	spin_unlock_irqrestore(&tidss->wait_lock, flags);
-
-	tidss_runtime_put(tidss);
-}
-
-int tidss_irq_install(struct drm_device *ddev, unsigned int irq)
-{
-	int ret;
-
-	if (irq == IRQ_NOTCONNECTED)
-		return -ENOTCONN;
-
-	tidss_irq_preinstall(ddev);
-
-	ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev);
-	if (ret)
-		return ret;
-
-	tidss_irq_postinstall(ddev);
-
 	return 0;
 }
 
@@ -156,9 +122,5 @@ void tidss_irq_uninstall(struct drm_device *ddev)
 {
 	struct tidss_device *tidss = to_tidss(ddev);
 
-	tidss_runtime_get(tidss);
-	dispc_set_irqenable(tidss->dispc, 0);
-	tidss_runtime_put(tidss);
-
 	free_irq(tidss->irq, ddev);
 }
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index d096d8d2bc8f841b21c8dcce267bb616aa1bf7a3..a0e494c806a96299a6c235154f5d9e4ff5035671 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -29,7 +29,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
 	tidss_runtime_get(tidss);
 
 	drm_atomic_helper_commit_modeset_disables(ddev, old_state);
-	drm_atomic_helper_commit_planes(ddev, old_state, 0);
+	drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
 	drm_atomic_helper_commit_modeset_enables(ddev, old_state);
 
 	drm_atomic_helper_commit_hw_done(old_state);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 2f6eaac7f659b4e4369e4e9d9dac5d7194b7a4f0..23bf16f596f617631985c5a039b5c203b67747ca 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -570,19 +570,18 @@ static int tilcdc_pdev_probe(struct platform_device *pdev)
 						       match);
 }
 
-static int tilcdc_pdev_remove(struct platform_device *pdev)
+static void tilcdc_pdev_remove(struct platform_device *pdev)
 {
 	int ret;
 
 	ret = tilcdc_get_external_components(&pdev->dev, NULL);
 	if (ret < 0)
-		return ret;
+		dev_err(&pdev->dev, "tilcdc_get_external_components() failed (%pe)\n",
+			ERR_PTR(ret));
 	else if (ret == 0)
 		tilcdc_fini(platform_get_drvdata(pdev));
 	else
 		component_master_del(&pdev->dev, &tilcdc_comp_ops);
-
-	return 0;
 }
 
 static void tilcdc_pdev_shutdown(struct platform_device *pdev)
@@ -599,7 +598,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
 
 static struct platform_driver tilcdc_platform_driver = {
 	.probe      = tilcdc_pdev_probe,
-	.remove     = tilcdc_pdev_remove,
+	.remove_new = tilcdc_pdev_remove,
 	.shutdown   = tilcdc_pdev_shutdown,
 	.driver     = {
 		.name   = "tilcdc",
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index 05a72473cfc65e2cfeb3a46c6985621d035be297..ab89b7fc7bf618bdbce7f888cbaf6982aa776159 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -19,7 +19,6 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 34bbbd7b53dd9214474fcc0487edbdd9c5bd28a1..7ce1c4617675060f75756014a38c37449f20912c 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -25,7 +25,6 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_managed.h>
 #include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 
 #define DRIVER_NAME	"simpledrm"
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e58b7e2498166af73a105f234ba4f9ec0fcc2195..edf10618fe2b22f70d88d18548bb0475643afee2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -370,7 +370,13 @@ static void ttm_bo_release(struct kref *kref)
 			spin_unlock(&bo->bdev->lru_lock);
 
 			INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
-			queue_work(bdev->wq, &bo->delayed_delete);
+
+			/* Schedule the worker on the closest NUMA node. This
+			 * improves performance since system memory might be
+			 * cleared on free and that is best done on a CPU core
+			 * close to it.
+			 */
+			queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
 			return;
 		}
 
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index d48b39132b32427e089c01c272d237ec90f3c40b..f5187b384ae9ac8eedede8e6a0d4d56eb8af1670 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -204,7 +204,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
 	if (ret)
 		return ret;
 
-	bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+	bdev->wq = alloc_workqueue("ttm",
+				   WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16);
 	if (!bdev->wq) {
 		ttm_global_release();
 		return -ENOMEM;
@@ -213,7 +214,8 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
 	bdev->funcs = funcs;
 
 	ttm_sys_man_init(bdev);
-	ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
+
+	ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
 
 	bdev->vma_manager = vma_manager;
 	spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 40876bcdd79a47ac24898ccf08b38014b1e6cd14..7702359c90c22594666347b6cc9d271db01df4bd 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -21,7 +21,6 @@
 #include <drm/drm_gem_framebuffer_helper.h>
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
 
@@ -261,6 +260,22 @@ static const uint64_t udl_primary_plane_fmtmods[] = {
 	DRM_FORMAT_MOD_INVALID
 };
 
+static int udl_primary_plane_helper_atomic_check(struct drm_plane *plane,
+						 struct drm_atomic_state *state)
+{
+	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+	struct drm_crtc *new_crtc = new_plane_state->crtc;
+	struct drm_crtc_state *new_crtc_state = NULL;
+
+	if (new_crtc)
+		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+	return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+						   DRM_PLANE_NO_SCALING,
+						   DRM_PLANE_NO_SCALING,
+						   false, false);
+}
+
 static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
 						   struct drm_atomic_state *state)
 {
@@ -296,7 +311,7 @@ static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
 
 static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
 	DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
-	.atomic_check = drm_plane_helper_atomic_check,
+	.atomic_check = udl_primary_plane_helper_atomic_check,
 	.atomic_update = udl_primary_plane_helper_atomic_update,
 };
 
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile
index 4b21b20e49981f0bd1279b81d56673b10dab8aeb..b7d673f1153bef16db3800e50b2bfaf36bf8871b 100644
--- a/drivers/gpu/drm/v3d/Makefile
+++ b/drivers/gpu/drm/v3d/Makefile
@@ -12,7 +12,8 @@ v3d-y := \
 	v3d_perfmon.o \
 	v3d_trace_points.o \
 	v3d_sched.o \
-	v3d_sysfs.o
+	v3d_sysfs.o \
+	v3d_submit.o
 
 v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o
 
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 8b3229a37c6df2c4adbf46eaacb133373568df6b..1bdfac8beafdf9149512e1d2f0f5ff09f2f8480f 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -33,6 +33,9 @@ void v3d_free_object(struct drm_gem_object *obj)
 	struct v3d_dev *v3d = to_v3d_dev(obj->dev);
 	struct v3d_bo *bo = to_v3d_bo(obj);
 
+	if (bo->vaddr)
+		v3d_put_bo_vaddr(bo);
+
 	v3d_mmu_remove_ptes(bo);
 
 	mutex_lock(&v3d->bo_lock);
@@ -134,6 +137,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
 	if (IS_ERR(shmem_obj))
 		return ERR_CAST(shmem_obj);
 	bo = to_v3d_bo(&shmem_obj->base);
+	bo->vaddr = NULL;
 
 	ret = v3d_bo_create_finish(&shmem_obj->base);
 	if (ret)
@@ -167,6 +171,20 @@ v3d_prime_import_sg_table(struct drm_device *dev,
 	return obj;
 }
 
+void v3d_get_bo_vaddr(struct v3d_bo *bo)
+{
+	struct drm_gem_shmem_object *obj = &bo->base;
+
+	bo->vaddr = vmap(obj->pages, obj->base.size >> PAGE_SHIFT, VM_MAP,
+			 pgprot_writecombine(PAGE_KERNEL));
+}
+
+void v3d_put_bo_vaddr(struct v3d_bo *bo)
+{
+	vunmap(bo->vaddr);
+	bo->vaddr = NULL;
+}
+
 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
@@ -233,3 +251,36 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
 	drm_gem_object_put(gem_obj);
 	return 0;
 }
+
+int
+v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	int ret;
+	struct drm_v3d_wait_bo *args = data;
+	ktime_t start = ktime_get();
+	u64 delta_ns;
+	unsigned long timeout_jiffies =
+		nsecs_to_jiffies_timeout(args->timeout_ns);
+
+	if (args->pad != 0)
+		return -EINVAL;
+
+	ret = drm_gem_dma_resv_wait(file_priv, args->handle,
+				    true, timeout_jiffies);
+
+	/* Decrement the user's timeout, in case we got interrupted
+	 * such that the ioctl will be restarted.
+	 */
+	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+	if (delta_ns < args->timeout_ns)
+		args->timeout_ns -= delta_ns;
+	else
+		args->timeout_ns = 0;
+
+	/* Asked to wait beyond the jiffie/scheduler precision? */
+	if (ret == -ETIME && args->timeout_ns)
+		ret = -EAGAIN;
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 44a1ca57d6a447282ce8d4455dacf8d584e99e03..3debf37e7d9ba48c4117ae0361acfe4e881a9c83 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -91,6 +91,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
 	case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
 		args->value = 1;
 		return 0;
+	case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE:
+		args->value = 1;
+		return 0;
 	default:
 		DRM_DEBUG("Unknown parameter %d\n", args->param);
 		return -EINVAL;
@@ -189,6 +192,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(V3D_PERFMON_CREATE, v3d_perfmon_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
 };
 
 static const struct drm_driver v3d_drm_driver = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 4c59fefaa0b4b18e27886e5d3335452b6f5cae9f..3c7d588665704a0be2d18ac644f4a198b969873f 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,7 +19,7 @@ struct reset_control;
 
 #define GMP_GRANULARITY (128 * 1024)
 
-#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
+#define V3D_MAX_QUEUES (V3D_CPU + 1)
 
 static inline char *v3d_queue_to_string(enum v3d_queue queue)
 {
@@ -29,6 +29,7 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue)
 	case V3D_TFU: return "tfu";
 	case V3D_CSD: return "csd";
 	case V3D_CACHE_CLEAN: return "cache_clean";
+	case V3D_CPU: return "cpu";
 	}
 	return "UNKNOWN";
 }
@@ -122,6 +123,7 @@ struct v3d_dev {
 	struct v3d_render_job *render_job;
 	struct v3d_tfu_job *tfu_job;
 	struct v3d_csd_job *csd_job;
+	struct v3d_cpu_job *cpu_job;
 
 	struct v3d_queue_state queue[V3D_MAX_QUEUES];
 
@@ -200,6 +202,8 @@ struct v3d_bo {
 	 * v3d_render_job->unref_list
 	 */
 	struct list_head unref_head;
+
+	void *vaddr;
 };
 
 static inline struct v3d_bo *
@@ -312,6 +316,112 @@ struct v3d_csd_job {
 	struct drm_v3d_submit_csd args;
 };
 
+enum v3d_cpu_job_type {
+	V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1,
+	V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY,
+	V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY,
+	V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY,
+	V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY,
+	V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY,
+};
+
+struct v3d_timestamp_query {
+	/* Offset of this query in the timestamp BO for its value. */
+	u32 offset;
+
+	/* Syncobj that indicates the timestamp availability */
+	struct drm_syncobj *syncobj;
+};
+
+/* Number of perfmons required to handle all supported performance counters */
+#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \
+				      DRM_V3D_MAX_PERF_COUNTERS)
+
+struct v3d_performance_query {
+	/* Performance monitor IDs for this query */
+	u32 kperfmon_ids[V3D_MAX_PERFMONS];
+
+	/* Syncobj that indicates the query availability */
+	struct drm_syncobj *syncobj;
+};
+
+struct v3d_indirect_csd_info {
+	/* Indirect CSD */
+	struct v3d_csd_job *job;
+
+	/* Clean cache job associated to the Indirect CSD job */
+	struct v3d_job *clean_job;
+
+	/* Offset within the BO where the workgroup counts are stored */
+	u32 offset;
+
+	/* Workgroups size */
+	u32 wg_size;
+
+	/* Indices of the uniforms with the workgroup dispatch counts
+	 * in the uniform stream.
+	 */
+	u32 wg_uniform_offsets[3];
+
+	/* Indirect BO */
+	struct drm_gem_object *indirect;
+
+	/* Context of the Indirect CSD job */
+	struct ww_acquire_ctx acquire_ctx;
+};
+
+struct v3d_timestamp_query_info {
+	struct v3d_timestamp_query *queries;
+
+	u32 count;
+};
+
+struct v3d_performance_query_info {
+	struct v3d_performance_query *queries;
+
+	/* Number of performance queries */
+	u32 count;
+
+	/* Number of performance monitors related to that query pool */
+	u32 nperfmons;
+
+	/* Number of performance counters related to that query pool */
+	u32 ncounters;
+};
+
+struct v3d_copy_query_results_info {
+	/* Define if should write to buffer using 64 or 32 bits */
+	bool do_64bit;
+
+	/* Define if it can write to buffer even if the query is not available */
+	bool do_partial;
+
+	/* Define if it should write availability bit to buffer */
+	bool availability_bit;
+
+	/* Offset of the copy buffer in the BO */
+	u32 offset;
+
+	/* Stride of the copy buffer in the BO */
+	u32 stride;
+};
+
+struct v3d_cpu_job {
+	struct v3d_job base;
+
+	enum v3d_cpu_job_type job_type;
+
+	struct v3d_indirect_csd_info indirect_csd;
+
+	struct v3d_timestamp_query_info timestamp_query;
+
+	struct v3d_copy_query_results_info copy;
+
+	struct v3d_performance_query_info performance_query;
+};
+
+typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *);
+
 struct v3d_submit_outsync {
 	struct drm_syncobj *syncobj;
 };
@@ -379,12 +489,16 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
 void v3d_free_object(struct drm_gem_object *gem_obj);
 struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
 			     size_t size);
+void v3d_get_bo_vaddr(struct v3d_bo *bo);
+void v3d_put_bo_vaddr(struct v3d_bo *bo);
 int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv);
 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
+int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv);
 struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
 						 struct dma_buf_attachment *attach,
 						 struct sg_table *sgt);
@@ -399,19 +513,21 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
 /* v3d_gem.c */
 int v3d_gem_init(struct drm_device *dev);
 void v3d_gem_destroy(struct drm_device *dev);
+void v3d_reset(struct v3d_dev *v3d);
+void v3d_invalidate_caches(struct v3d_dev *v3d);
+void v3d_clean_caches(struct v3d_dev *v3d);
+
+/* v3d_submit.c */
+void v3d_job_cleanup(struct v3d_job *job);
+void v3d_job_put(struct v3d_job *job);
 int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
-int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv);
-void v3d_job_cleanup(struct v3d_job *job);
-void v3d_job_put(struct v3d_job *job);
-void v3d_reset(struct v3d_dev *v3d);
-void v3d_invalidate_caches(struct v3d_dev *v3d);
-void v3d_clean_caches(struct v3d_dev *v3d);
+int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
 
 /* v3d_irq.c */
 int v3d_irq_init(struct v3d_dev *v3d);
@@ -420,8 +536,6 @@ void v3d_irq_disable(struct v3d_dev *v3d);
 void v3d_irq_reset(struct v3d_dev *v3d);
 
 /* v3d_mmu.c */
-int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo,
-		       u32 *offset);
 int v3d_mmu_set_page_table(struct v3d_dev *v3d);
 void v3d_mmu_insert_ptes(struct v3d_bo *bo);
 void v3d_mmu_remove_ptes(struct v3d_bo *bo);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 9d2ac23c29e33e9283241ea96e3fa8f47a542fce..afc565078c785ef8415ba957f57a6f05e575169e 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -11,8 +11,6 @@
 #include <linux/uaccess.h>
 
 #include <drm/drm_managed.h>
-#include <drm/drm_syncobj.h>
-#include <uapi/drm/v3d_drm.h>
 
 #include "v3d_drv.h"
 #include "v3d_regs.h"
@@ -241,772 +239,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
 	v3d_invalidate_slices(v3d, 0);
 }
 
-/* Takes the reservation lock on all the BOs being referenced, so that
- * at queue submit time we can update the reservations.
- *
- * We don't lock the RCL the tile alloc/state BOs, or overflow memory
- * (all of which are on exec->unref_list).  They're entirely private
- * to v3d, so we don't attach dma-buf fences to them.
- */
-static int
-v3d_lock_bo_reservations(struct v3d_job *job,
-			 struct ww_acquire_ctx *acquire_ctx)
-{
-	int i, ret;
-
-	ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < job->bo_count; i++) {
-		ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
-		if (ret)
-			goto fail;
-
-		ret = drm_sched_job_add_implicit_dependencies(&job->base,
-							      job->bo[i], true);
-		if (ret)
-			goto fail;
-	}
-
-	return 0;
-
-fail:
-	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-	return ret;
-}
-
-/**
- * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
- * referenced by the job.
- * @dev: DRM device
- * @file_priv: DRM file for this fd
- * @job: V3D job being set up
- * @bo_handles: GEM handles
- * @bo_count: Number of GEM handles passed in
- *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list.  This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at v3d_exec_cleanup() time.
- */
-static int
-v3d_lookup_bos(struct drm_device *dev,
-	       struct drm_file *file_priv,
-	       struct v3d_job *job,
-	       u64 bo_handles,
-	       u32 bo_count)
-{
-	job->bo_count = bo_count;
-
-	if (!job->bo_count) {
-		/* See comment on bo_index for why we have to check
-		 * this.
-		 */
-		DRM_DEBUG("Rendering requires BOs\n");
-		return -EINVAL;
-	}
-
-	return drm_gem_objects_lookup(file_priv,
-				      (void __user *)(uintptr_t)bo_handles,
-				      job->bo_count, &job->bo);
-}
-
-static void
-v3d_job_free(struct kref *ref)
-{
-	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
-	int i;
-
-	if (job->bo) {
-		for (i = 0; i < job->bo_count; i++)
-			drm_gem_object_put(job->bo[i]);
-		kvfree(job->bo);
-	}
-
-	dma_fence_put(job->irq_fence);
-	dma_fence_put(job->done_fence);
-
-	if (job->perfmon)
-		v3d_perfmon_put(job->perfmon);
-
-	kfree(job);
-}
-
-static void
-v3d_render_job_free(struct kref *ref)
-{
-	struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
-						  base.refcount);
-	struct v3d_bo *bo, *save;
-
-	list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
-		drm_gem_object_put(&bo->base.base);
-	}
-
-	v3d_job_free(ref);
-}
-
-void v3d_job_cleanup(struct v3d_job *job)
-{
-	if (!job)
-		return;
-
-	drm_sched_job_cleanup(&job->base);
-	v3d_job_put(job);
-}
-
-void v3d_job_put(struct v3d_job *job)
-{
-	kref_put(&job->refcount, job->free);
-}
-
-int
-v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
-		  struct drm_file *file_priv)
-{
-	int ret;
-	struct drm_v3d_wait_bo *args = data;
-	ktime_t start = ktime_get();
-	u64 delta_ns;
-	unsigned long timeout_jiffies =
-		nsecs_to_jiffies_timeout(args->timeout_ns);
-
-	if (args->pad != 0)
-		return -EINVAL;
-
-	ret = drm_gem_dma_resv_wait(file_priv, args->handle,
-				    true, timeout_jiffies);
-
-	/* Decrement the user's timeout, in case we got interrupted
-	 * such that the ioctl will be restarted.
-	 */
-	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
-	if (delta_ns < args->timeout_ns)
-		args->timeout_ns -= delta_ns;
-	else
-		args->timeout_ns = 0;
-
-	/* Asked to wait beyond the jiffie/scheduler precision? */
-	if (ret == -ETIME && args->timeout_ns)
-		ret = -EAGAIN;
-
-	return ret;
-}
-
-static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
-	     void **container, size_t size, void (*free)(struct kref *ref),
-	     u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
-{
-	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
-	struct v3d_job *job;
-	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
-	int ret, i;
-
-	*container = kcalloc(1, size, GFP_KERNEL);
-	if (!*container) {
-		DRM_ERROR("Cannot allocate memory for v3d job.");
-		return -ENOMEM;
-	}
-
-	job = *container;
-	job->v3d = v3d;
-	job->free = free;
-	job->file = file_priv;
-
-	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
-				 1, v3d_priv);
-	if (ret)
-		goto fail;
-
-	if (has_multisync) {
-		if (se->in_sync_count && se->wait_stage == queue) {
-			struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
-
-			for (i = 0; i < se->in_sync_count; i++) {
-				struct drm_v3d_sem in;
-
-				if (copy_from_user(&in, handle++, sizeof(in))) {
-					ret = -EFAULT;
-					DRM_DEBUG("Failed to copy wait dep handle.\n");
-					goto fail_deps;
-				}
-				ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
-
-				// TODO: Investigate why this was filtered out for the IOCTL.
-				if (ret && ret != -ENOENT)
-					goto fail_deps;
-			}
-		}
-	} else {
-		ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
-
-		// TODO: Investigate why this was filtered out for the IOCTL.
-		if (ret && ret != -ENOENT)
-			goto fail_deps;
-	}
-
-	kref_init(&job->refcount);
-
-	return 0;
-
-fail_deps:
-	drm_sched_job_cleanup(&job->base);
-fail:
-	kfree(*container);
-	*container = NULL;
-
-	return ret;
-}
-
-static void
-v3d_push_job(struct v3d_job *job)
-{
-	drm_sched_job_arm(&job->base);
-
-	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
-
-	/* put by scheduler job completion */
-	kref_get(&job->refcount);
-
-	drm_sched_entity_push_job(&job->base);
-}
-
-static void
-v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
-					 struct v3d_job *job,
-					 struct ww_acquire_ctx *acquire_ctx,
-					 u32 out_sync,
-					 struct v3d_submit_ext *se,
-					 struct dma_fence *done_fence)
-{
-	struct drm_syncobj *sync_out;
-	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
-	int i;
-
-	for (i = 0; i < job->bo_count; i++) {
-		/* XXX: Use shared fences for read-only objects. */
-		dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
-				   DMA_RESV_USAGE_WRITE);
-	}
-
-	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
-
-	/* Update the return sync object for the job */
-	/* If it only supports a single signal semaphore*/
-	if (!has_multisync) {
-		sync_out = drm_syncobj_find(file_priv, out_sync);
-		if (sync_out) {
-			drm_syncobj_replace_fence(sync_out, done_fence);
-			drm_syncobj_put(sync_out);
-		}
-		return;
-	}
-
-	/* If multiple semaphores extension is supported */
-	if (se->out_sync_count) {
-		for (i = 0; i < se->out_sync_count; i++) {
-			drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
-						  done_fence);
-			drm_syncobj_put(se->out_syncs[i].syncobj);
-		}
-		kvfree(se->out_syncs);
-	}
-}
-
-static void
-v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
-{
-	unsigned int i;
-
-	if (!(se && se->out_sync_count))
-		return;
-
-	for (i = 0; i < se->out_sync_count; i++)
-		drm_syncobj_put(se->out_syncs[i].syncobj);
-	kvfree(se->out_syncs);
-}
-
-static int
-v3d_get_multisync_post_deps(struct drm_file *file_priv,
-			    struct v3d_submit_ext *se,
-			    u32 count, u64 handles)
-{
-	struct drm_v3d_sem __user *post_deps;
-	int i, ret;
-
-	if (!count)
-		return 0;
-
-	se->out_syncs = (struct v3d_submit_outsync *)
-			kvmalloc_array(count,
-				       sizeof(struct v3d_submit_outsync),
-				       GFP_KERNEL);
-	if (!se->out_syncs)
-		return -ENOMEM;
-
-	post_deps = u64_to_user_ptr(handles);
-
-	for (i = 0; i < count; i++) {
-		struct drm_v3d_sem out;
-
-		if (copy_from_user(&out, post_deps++, sizeof(out))) {
-			ret = -EFAULT;
-			DRM_DEBUG("Failed to copy post dep handles\n");
-			goto fail;
-		}
-
-		se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
-							    out.handle);
-		if (!se->out_syncs[i].syncobj) {
-			ret = -EINVAL;
-			goto fail;
-		}
-	}
-	se->out_sync_count = count;
-
-	return 0;
-
-fail:
-	for (i--; i >= 0; i--)
-		drm_syncobj_put(se->out_syncs[i].syncobj);
-	kvfree(se->out_syncs);
-
-	return ret;
-}
-
-/* Get data for multiple binary semaphores synchronization. Parse syncobj
- * to be signaled when job completes (out_sync).
- */
-static int
-v3d_get_multisync_submit_deps(struct drm_file *file_priv,
-			      struct drm_v3d_extension __user *ext,
-			      void *data)
-{
-	struct drm_v3d_multi_sync multisync;
-	struct v3d_submit_ext *se = data;
-	int ret;
-
-	if (copy_from_user(&multisync, ext, sizeof(multisync)))
-		return -EFAULT;
-
-	if (multisync.pad)
-		return -EINVAL;
-
-	ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
-					  multisync.out_syncs);
-	if (ret)
-		return ret;
-
-	se->in_sync_count = multisync.in_sync_count;
-	se->in_syncs = multisync.in_syncs;
-	se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
-	se->wait_stage = multisync.wait_stage;
-
-	return 0;
-}
-
-/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
- * according to the extension id (name).
- */
-static int
-v3d_get_extensions(struct drm_file *file_priv,
-		   u64 ext_handles,
-		   void *data)
-{
-	struct drm_v3d_extension __user *user_ext;
-	int ret;
-
-	user_ext = u64_to_user_ptr(ext_handles);
-	while (user_ext) {
-		struct drm_v3d_extension ext;
-
-		if (copy_from_user(&ext, user_ext, sizeof(ext))) {
-			DRM_DEBUG("Failed to copy submit extension\n");
-			return -EFAULT;
-		}
-
-		switch (ext.id) {
-		case DRM_V3D_EXT_ID_MULTI_SYNC:
-			ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
-			if (ret)
-				return ret;
-			break;
-		default:
-			DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
-			return -EINVAL;
-		}
-
-		user_ext = u64_to_user_ptr(ext.next);
-	}
-
-	return 0;
-}
-
-/**
- * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * This is the main entrypoint for userspace to submit a 3D frame to
- * the GPU.  Userspace provides the binner command list (if
- * applicable), and the kernel sets up the render command list to draw
- * to the framebuffer described in the ioctl, using the command lists
- * that the 3D engine's binner will produce.
- */
-int
-v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
-{
-	struct v3d_dev *v3d = to_v3d_dev(dev);
-	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
-	struct drm_v3d_submit_cl *args = data;
-	struct v3d_submit_ext se = {0};
-	struct v3d_bin_job *bin = NULL;
-	struct v3d_render_job *render = NULL;
-	struct v3d_job *clean_job = NULL;
-	struct v3d_job *last_job;
-	struct ww_acquire_ctx acquire_ctx;
-	int ret = 0;
-
-	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
-
-	if (args->pad)
-		return -EINVAL;
-
-	if (args->flags &&
-	    args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
-			    DRM_V3D_SUBMIT_EXTENSION)) {
-		DRM_INFO("invalid flags: %d\n", args->flags);
-		return -EINVAL;
-	}
-
-	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
-		ret = v3d_get_extensions(file_priv, args->extensions, &se);
-		if (ret) {
-			DRM_DEBUG("Failed to get extensions.\n");
-			return ret;
-		}
-	}
-
-	ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
-			   v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
-	if (ret)
-		goto fail;
-
-	render->start = args->rcl_start;
-	render->end = args->rcl_end;
-	INIT_LIST_HEAD(&render->unref_list);
-
-	if (args->bcl_start != args->bcl_end) {
-		ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
-				   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
-		if (ret)
-			goto fail;
-
-		bin->start = args->bcl_start;
-		bin->end = args->bcl_end;
-		bin->qma = args->qma;
-		bin->qms = args->qms;
-		bin->qts = args->qts;
-		bin->render = render;
-	}
-
-	if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
-		ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
-				   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
-		if (ret)
-			goto fail;
-
-		last_job = clean_job;
-	} else {
-		last_job = &render->base;
-	}
-
-	ret = v3d_lookup_bos(dev, file_priv, last_job,
-			     args->bo_handles, args->bo_handle_count);
-	if (ret)
-		goto fail;
-
-	ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
-	if (ret)
-		goto fail;
-
-	if (args->perfmon_id) {
-		render->base.perfmon = v3d_perfmon_find(v3d_priv,
-							args->perfmon_id);
-
-		if (!render->base.perfmon) {
-			ret = -ENOENT;
-			goto fail_perfmon;
-		}
-	}
-
-	mutex_lock(&v3d->sched_lock);
-	if (bin) {
-		bin->base.perfmon = render->base.perfmon;
-		v3d_perfmon_get(bin->base.perfmon);
-		v3d_push_job(&bin->base);
-
-		ret = drm_sched_job_add_dependency(&render->base.base,
-						   dma_fence_get(bin->base.done_fence));
-		if (ret)
-			goto fail_unreserve;
-	}
-
-	v3d_push_job(&render->base);
-
-	if (clean_job) {
-		struct dma_fence *render_fence =
-			dma_fence_get(render->base.done_fence);
-		ret = drm_sched_job_add_dependency(&clean_job->base,
-						   render_fence);
-		if (ret)
-			goto fail_unreserve;
-		clean_job->perfmon = render->base.perfmon;
-		v3d_perfmon_get(clean_job->perfmon);
-		v3d_push_job(clean_job);
-	}
-
-	mutex_unlock(&v3d->sched_lock);
-
-	v3d_attach_fences_and_unlock_reservation(file_priv,
-						 last_job,
-						 &acquire_ctx,
-						 args->out_sync,
-						 &se,
-						 last_job->done_fence);
-
-	if (bin)
-		v3d_job_put(&bin->base);
-	v3d_job_put(&render->base);
-	if (clean_job)
-		v3d_job_put(clean_job);
-
-	return 0;
-
-fail_unreserve:
-	mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
-	drm_gem_unlock_reservations(last_job->bo,
-				    last_job->bo_count, &acquire_ctx);
-fail:
-	v3d_job_cleanup((void *)bin);
-	v3d_job_cleanup((void *)render);
-	v3d_job_cleanup(clean_job);
-	v3d_put_multisync_post_deps(&se);
-
-	return ret;
-}
-
-/**
- * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the TFU, which we don't
- * need to validate since the TFU is behind the MMU.
- */
-int
-v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	struct v3d_dev *v3d = to_v3d_dev(dev);
-	struct drm_v3d_submit_tfu *args = data;
-	struct v3d_submit_ext se = {0};
-	struct v3d_tfu_job *job = NULL;
-	struct ww_acquire_ctx acquire_ctx;
-	int ret = 0;
-
-	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
-
-	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
-		DRM_DEBUG("invalid flags: %d\n", args->flags);
-		return -EINVAL;
-	}
-
-	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
-		ret = v3d_get_extensions(file_priv, args->extensions, &se);
-		if (ret) {
-			DRM_DEBUG("Failed to get extensions.\n");
-			return ret;
-		}
-	}
-
-	ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
-			   v3d_job_free, args->in_sync, &se, V3D_TFU);
-	if (ret)
-		goto fail;
-
-	job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
-			       sizeof(*job->base.bo), GFP_KERNEL);
-	if (!job->base.bo) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	job->args = *args;
-
-	for (job->base.bo_count = 0;
-	     job->base.bo_count < ARRAY_SIZE(args->bo_handles);
-	     job->base.bo_count++) {
-		struct drm_gem_object *bo;
-
-		if (!args->bo_handles[job->base.bo_count])
-			break;
-
-		bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
-		if (!bo) {
-			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
-				  job->base.bo_count,
-				  args->bo_handles[job->base.bo_count]);
-			ret = -ENOENT;
-			goto fail;
-		}
-		job->base.bo[job->base.bo_count] = bo;
-	}
-
-	ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
-	if (ret)
-		goto fail;
-
-	mutex_lock(&v3d->sched_lock);
-	v3d_push_job(&job->base);
-	mutex_unlock(&v3d->sched_lock);
-
-	v3d_attach_fences_and_unlock_reservation(file_priv,
-						 &job->base, &acquire_ctx,
-						 args->out_sync,
-						 &se,
-						 job->base.done_fence);
-
-	v3d_job_put(&job->base);
-
-	return 0;
-
-fail:
-	v3d_job_cleanup((void *)job);
-	v3d_put_multisync_post_deps(&se);
-
-	return ret;
-}
-
-/**
- * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
- * @dev: DRM device
- * @data: ioctl argument
- * @file_priv: DRM file for this fd
- *
- * Userspace provides the register setup for the CSD, which we don't
- * need to validate since the CSD is behind the MMU.
- */
-int
-v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	struct v3d_dev *v3d = to_v3d_dev(dev);
-	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
-	struct drm_v3d_submit_csd *args = data;
-	struct v3d_submit_ext se = {0};
-	struct v3d_csd_job *job = NULL;
-	struct v3d_job *clean_job = NULL;
-	struct ww_acquire_ctx acquire_ctx;
-	int ret;
-
-	trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
-
-	if (args->pad)
-		return -EINVAL;
-
-	if (!v3d_has_csd(v3d)) {
-		DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
-		return -EINVAL;
-	}
-
-	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
-		DRM_INFO("invalid flags: %d\n", args->flags);
-		return -EINVAL;
-	}
-
-	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
-		ret = v3d_get_extensions(file_priv, args->extensions, &se);
-		if (ret) {
-			DRM_DEBUG("Failed to get extensions.\n");
-			return ret;
-		}
-	}
-
-	ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
-			   v3d_job_free, args->in_sync, &se, V3D_CSD);
-	if (ret)
-		goto fail;
-
-	ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
-			   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
-	if (ret)
-		goto fail;
-
-	job->args = *args;
-
-	ret = v3d_lookup_bos(dev, file_priv, clean_job,
-			     args->bo_handles, args->bo_handle_count);
-	if (ret)
-		goto fail;
-
-	ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
-	if (ret)
-		goto fail;
-
-	if (args->perfmon_id) {
-		job->base.perfmon = v3d_perfmon_find(v3d_priv,
-						     args->perfmon_id);
-		if (!job->base.perfmon) {
-			ret = -ENOENT;
-			goto fail_perfmon;
-		}
-	}
-
-	mutex_lock(&v3d->sched_lock);
-	v3d_push_job(&job->base);
-
-	ret = drm_sched_job_add_dependency(&clean_job->base,
-					   dma_fence_get(job->base.done_fence));
-	if (ret)
-		goto fail_unreserve;
-
-	v3d_push_job(clean_job);
-	mutex_unlock(&v3d->sched_lock);
-
-	v3d_attach_fences_and_unlock_reservation(file_priv,
-						 clean_job,
-						 &acquire_ctx,
-						 args->out_sync,
-						 &se,
-						 clean_job->done_fence);
-
-	v3d_job_put(&job->base);
-	v3d_job_put(clean_job);
-
-	return 0;
-
-fail_unreserve:
-	mutex_unlock(&v3d->sched_lock);
-fail_perfmon:
-	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
-				    &acquire_ctx);
-fail:
-	v3d_job_cleanup((void *)job);
-	v3d_job_cleanup(clean_job);
-	v3d_put_multisync_post_deps(&se);
-
-	return ret;
-}
-
 int
 v3d_gem_init(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index fccbea2a5f2ebd5f5b6b100d710db1476544b16e..54015ad765c75b594132f80b32cc1a707b4cd7c9 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -21,10 +21,14 @@
 #include <linux/sched/clock.h>
 #include <linux/kthread.h>
 
+#include <drm/drm_syncobj.h>
+
 #include "v3d_drv.h"
 #include "v3d_regs.h"
 #include "v3d_trace.h"
 
+#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
+
 static struct v3d_job *
 to_v3d_job(struct drm_sched_job *sched_job)
 {
@@ -55,6 +59,12 @@ to_csd_job(struct drm_sched_job *sched_job)
 	return container_of(sched_job, struct v3d_csd_job, base.base);
 }
 
+static struct v3d_cpu_job *
+to_cpu_job(struct drm_sched_job *sched_job)
+{
+	return container_of(sched_job, struct v3d_cpu_job, base.base);
+}
+
 static void
 v3d_sched_job_free(struct drm_sched_job *sched_job)
 {
@@ -63,6 +73,28 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
 	v3d_job_cleanup(job);
 }
 
+static void
+v3d_cpu_job_free(struct drm_sched_job *sched_job)
+{
+	struct v3d_cpu_job *job = to_cpu_job(sched_job);
+	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+	struct v3d_performance_query_info *performance_query = &job->performance_query;
+
+	if (timestamp_query->queries) {
+		for (int i = 0; i < timestamp_query->count; i++)
+			drm_syncobj_put(timestamp_query->queries[i].syncobj);
+		kvfree(timestamp_query->queries);
+	}
+
+	if (performance_query->queries) {
+		for (int i = 0; i < performance_query->count; i++)
+			drm_syncobj_put(performance_query->queries[i].syncobj);
+		kvfree(performance_query->queries);
+	}
+
+	v3d_job_cleanup(&job->base);
+}
+
 static void
 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
 {
@@ -262,6 +294,275 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
 	return fence;
 }
 
+static void
+v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
+{
+	struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
+	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+	struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
+	struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
+	u32 *wg_counts;
+
+	v3d_get_bo_vaddr(bo);
+	v3d_get_bo_vaddr(indirect);
+
+	wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
+
+	if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
+		return;
+
+	args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+	args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+	args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
+	args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
+		       (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1;
+
+	for (int i = 0; i < 3; i++) {
+		/* 0xffffffff indicates that the uniform rewrite is not needed */
+		if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
+			u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
+			((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
+		}
+	}
+
+	v3d_put_bo_vaddr(indirect);
+	v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_timestamp_query(struct v3d_cpu_job *job)
+{
+	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+	u8 *value_addr;
+
+	v3d_get_bo_vaddr(bo);
+
+	for (int i = 0; i < timestamp_query->count; i++) {
+		value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
+		*((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
+
+		drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
+					  job->base.done_fence);
+	}
+
+	v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
+{
+	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+	struct v3d_timestamp_query *queries = timestamp_query->queries;
+	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+	u8 *value_addr;
+
+	v3d_get_bo_vaddr(bo);
+
+	for (int i = 0; i < timestamp_query->count; i++) {
+		value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
+		*((u64 *)value_addr) = 0;
+
+		drm_syncobj_replace_fence(queries[i].syncobj, NULL);
+	}
+
+	v3d_put_bo_vaddr(bo);
+}
+
+static void
+write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value)
+{
+	if (do_64bit) {
+		u64 *dst64 = (u64 *)dst;
+
+		dst64[idx] = value;
+	} else {
+		u32 *dst32 = (u32 *)dst;
+
+		dst32[idx] = (u32)value;
+	}
+}
+
+static void
+v3d_copy_query_results(struct v3d_cpu_job *job)
+{
+	struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
+	struct v3d_timestamp_query *queries = timestamp_query->queries;
+	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+	struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
+	struct v3d_copy_query_results_info *copy = &job->copy;
+	struct dma_fence *fence;
+	u8 *query_addr;
+	bool available, write_result;
+	u8 *data;
+	int i;
+
+	v3d_get_bo_vaddr(bo);
+	v3d_get_bo_vaddr(timestamp);
+
+	data = ((u8 *)bo->vaddr) + copy->offset;
+
+	for (i = 0; i < timestamp_query->count; i++) {
+		fence = drm_syncobj_fence_get(queries[i].syncobj);
+		available = fence ? dma_fence_is_signaled(fence) : false;
+
+		write_result = available || copy->do_partial;
+		if (write_result) {
+			query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
+			write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
+		}
+
+		if (copy->availability_bit)
+			write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
+
+		data += copy->stride;
+
+		dma_fence_put(fence);
+	}
+
+	v3d_put_bo_vaddr(timestamp);
+	v3d_put_bo_vaddr(bo);
+}
+
+static void
+v3d_reset_performance_queries(struct v3d_cpu_job *job)
+{
+	struct v3d_performance_query_info *performance_query = &job->performance_query;
+	struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+	struct v3d_dev *v3d = job->base.v3d;
+	struct v3d_perfmon *perfmon;
+
+	for (int i = 0; i < performance_query->count; i++) {
+		for (int j = 0; j < performance_query->nperfmons; j++) {
+			perfmon = v3d_perfmon_find(v3d_priv,
+						   performance_query->queries[i].kperfmon_ids[j]);
+			if (!perfmon) {
+				DRM_DEBUG("Failed to find perfmon.");
+				continue;
+			}
+
+			v3d_perfmon_stop(v3d, perfmon, false);
+
+			memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
+
+			v3d_perfmon_put(perfmon);
+		}
+
+		drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
+	}
+}
+
+static void
+v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query)
+{
+	struct v3d_performance_query_info *performance_query = &job->performance_query;
+	struct v3d_copy_query_results_info *copy = &job->copy;
+	struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+	struct v3d_dev *v3d = job->base.v3d;
+	struct v3d_perfmon *perfmon;
+	u64 counter_values[V3D_PERFCNT_NUM];
+
+	for (int i = 0; i < performance_query->nperfmons; i++) {
+		perfmon = v3d_perfmon_find(v3d_priv,
+					   performance_query->queries[query].kperfmon_ids[i]);
+		if (!perfmon) {
+			DRM_DEBUG("Failed to find perfmon.");
+			continue;
+		}
+
+		v3d_perfmon_stop(v3d, perfmon, true);
+
+		memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values,
+		       perfmon->ncounters * sizeof(u64));
+
+		v3d_perfmon_put(perfmon);
+	}
+
+	for (int i = 0; i < performance_query->ncounters; i++)
+		write_to_buffer(data, i, copy->do_64bit, counter_values[i]);
+}
+
+static void
+v3d_copy_performance_query(struct v3d_cpu_job *job)
+{
+	struct v3d_performance_query_info *performance_query = &job->performance_query;
+	struct v3d_copy_query_results_info *copy = &job->copy;
+	struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
+	struct dma_fence *fence;
+	bool available, write_result;
+	u8 *data;
+
+	v3d_get_bo_vaddr(bo);
+
+	data = ((u8 *)bo->vaddr) + copy->offset;
+
+	for (int i = 0; i < performance_query->count; i++) {
+		fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
+		available = fence ? dma_fence_is_signaled(fence) : false;
+
+		write_result = available || copy->do_partial;
+		if (write_result)
+			v3d_write_performance_query_result(job, data, i);
+
+		if (copy->availability_bit)
+			write_to_buffer(data, performance_query->ncounters,
+					copy->do_64bit, available ? 1u : 0u);
+
+		data += copy->stride;
+
+		dma_fence_put(fence);
+	}
+
+	v3d_put_bo_vaddr(bo);
+}
+
+static const v3d_cpu_job_fn cpu_job_function[] = {
+	[V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
+	[V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
+	[V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
+	[V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
+	[V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
+	[V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
+};
+
+static struct dma_fence *
+v3d_cpu_job_run(struct drm_sched_job *sched_job)
+{
+	struct v3d_cpu_job *job = to_cpu_job(sched_job);
+	struct v3d_dev *v3d = job->base.v3d;
+	struct v3d_file_priv *file = job->base.file->driver_priv;
+	u64 runtime;
+
+	v3d->cpu_job = job;
+
+	if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
+		DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
+		return NULL;
+	}
+
+	file->start_ns[V3D_CPU] = local_clock();
+	v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU];
+
+	trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
+
+	cpu_job_function[job->job_type](job);
+
+	trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
+
+	runtime = local_clock() - file->start_ns[V3D_CPU];
+
+	file->enabled_ns[V3D_CPU] += runtime;
+	v3d->queue[V3D_CPU].enabled_ns += runtime;
+
+	file->jobs_sent[V3D_CPU]++;
+	v3d->queue[V3D_CPU].jobs_sent++;
+
+	file->start_ns[V3D_CPU] = 0;
+	v3d->queue[V3D_CPU].start_ns = 0;
+
+	return NULL;
+}
+
 static struct dma_fence *
 v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
 {
@@ -416,6 +717,12 @@ static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
 	.free_job = v3d_sched_job_free
 };
 
+static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
+	.run_job = v3d_cpu_job_run,
+	.timedout_job = v3d_generic_job_timedout,
+	.free_job = v3d_cpu_job_free
+};
+
 int
 v3d_sched_init(struct v3d_dev *v3d)
 {
@@ -471,6 +778,15 @@ v3d_sched_init(struct v3d_dev *v3d)
 			goto fail;
 	}
 
+	ret = drm_sched_init(&v3d->queue[V3D_CPU].sched,
+			     &v3d_cpu_sched_ops, NULL,
+			     DRM_SCHED_PRIORITY_COUNT,
+			     1, job_hang_limit,
+			     msecs_to_jiffies(hang_limit_ms), NULL,
+			     NULL, "v3d_cpu", v3d->drm.dev);
+	if (ret)
+		goto fail;
+
 	return 0;
 
 fail:
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
new file mode 100644
index 0000000000000000000000000000000000000000..fcff41dd2315b710dc9de6ccdb361922c61d2602
--- /dev/null
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -0,0 +1,1320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014-2018 Broadcom
+ * Copyright (C) 2023 Raspberry Pi
+ */
+
+#include <drm/drm_syncobj.h>
+
+#include "v3d_drv.h"
+#include "v3d_regs.h"
+#include "v3d_trace.h"
+
+/* Takes the reservation lock on all the BOs being referenced, so that
+ * at queue submit time we can update the reservations.
+ *
+ * We don't lock the RCL the tile alloc/state BOs, or overflow memory
+ * (all of which are on exec->unref_list).  They're entirely private
+ * to v3d, so we don't attach dma-buf fences to them.
+ */
+static int
+v3d_lock_bo_reservations(struct v3d_job *job,
+			 struct ww_acquire_ctx *acquire_ctx)
+{
+	int i, ret;
+
+	ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < job->bo_count; i++) {
+		ret = dma_resv_reserve_fences(job->bo[i]->resv, 1);
+		if (ret)
+			goto fail;
+
+		ret = drm_sched_job_add_implicit_dependencies(&job->base,
+							      job->bo[i], true);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+	return ret;
+}
+
+/**
+ * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @job: V3D job being set up
+ * @bo_handles: GEM handles
+ * @bo_count: Number of GEM handles passed in
+ *
+ * The command validator needs to reference BOs by their index within
+ * the submitted job's BO list.  This does the validation of the job's
+ * BO list and reference counting for the lifetime of the job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at v3d_exec_cleanup() time.
+ */
+static int
+v3d_lookup_bos(struct drm_device *dev,
+	       struct drm_file *file_priv,
+	       struct v3d_job *job,
+	       u64 bo_handles,
+	       u32 bo_count)
+{
+	job->bo_count = bo_count;
+
+	if (!job->bo_count) {
+		/* See comment on bo_index for why we have to check
+		 * this.
+		 */
+		DRM_DEBUG("Rendering requires BOs\n");
+		return -EINVAL;
+	}
+
+	return drm_gem_objects_lookup(file_priv,
+				      (void __user *)(uintptr_t)bo_handles,
+				      job->bo_count, &job->bo);
+}
+
+static void
+v3d_job_free(struct kref *ref)
+{
+	struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
+	int i;
+
+	if (job->bo) {
+		for (i = 0; i < job->bo_count; i++)
+			drm_gem_object_put(job->bo[i]);
+		kvfree(job->bo);
+	}
+
+	dma_fence_put(job->irq_fence);
+	dma_fence_put(job->done_fence);
+
+	if (job->perfmon)
+		v3d_perfmon_put(job->perfmon);
+
+	kfree(job);
+}
+
+static void
+v3d_render_job_free(struct kref *ref)
+{
+	struct v3d_render_job *job = container_of(ref, struct v3d_render_job,
+						  base.refcount);
+	struct v3d_bo *bo, *save;
+
+	list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
+		drm_gem_object_put(&bo->base.base);
+	}
+
+	v3d_job_free(ref);
+}
+
+void v3d_job_cleanup(struct v3d_job *job)
+{
+	if (!job)
+		return;
+
+	drm_sched_job_cleanup(&job->base);
+	v3d_job_put(job);
+}
+
+void v3d_job_put(struct v3d_job *job)
+{
+	if (!job)
+		return;
+
+	kref_put(&job->refcount, job->free);
+}
+
+static int
+v3d_job_allocate(void **container, size_t size)
+{
+	*container = kcalloc(1, size, GFP_KERNEL);
+	if (!*container) {
+		DRM_ERROR("Cannot allocate memory for V3D job.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+	     struct v3d_job *job, void (*free)(struct kref *ref),
+	     u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+	int ret, i;
+
+	job->v3d = v3d;
+	job->free = free;
+	job->file = file_priv;
+
+	ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+				 1, v3d_priv);
+	if (ret)
+		return ret;
+
+	if (has_multisync) {
+		if (se->in_sync_count && se->wait_stage == queue) {
+			struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+			for (i = 0; i < se->in_sync_count; i++) {
+				struct drm_v3d_sem in;
+
+				if (copy_from_user(&in, handle++, sizeof(in))) {
+					ret = -EFAULT;
+					DRM_DEBUG("Failed to copy wait dep handle.\n");
+					goto fail_deps;
+				}
+				ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
+
+				// TODO: Investigate why this was filtered out for the IOCTL.
+				if (ret && ret != -ENOENT)
+					goto fail_deps;
+			}
+		}
+	} else {
+		ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
+
+		// TODO: Investigate why this was filtered out for the IOCTL.
+		if (ret && ret != -ENOENT)
+			goto fail_deps;
+	}
+
+	kref_init(&job->refcount);
+
+	return 0;
+
+fail_deps:
+	drm_sched_job_cleanup(&job->base);
+	return ret;
+}
+
+static void
+v3d_push_job(struct v3d_job *job)
+{
+	drm_sched_job_arm(&job->base);
+
+	job->done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+	/* put by scheduler job completion */
+	kref_get(&job->refcount);
+
+	drm_sched_entity_push_job(&job->base);
+}
+
+static void
+v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
+					 struct v3d_job *job,
+					 struct ww_acquire_ctx *acquire_ctx,
+					 u32 out_sync,
+					 struct v3d_submit_ext *se,
+					 struct dma_fence *done_fence)
+{
+	struct drm_syncobj *sync_out;
+	bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+	int i;
+
+	for (i = 0; i < job->bo_count; i++) {
+		/* XXX: Use shared fences for read-only objects. */
+		dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+				   DMA_RESV_USAGE_WRITE);
+	}
+
+	drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+
+	/* Update the return sync object for the job */
+	/* If it only supports a single signal semaphore*/
+	if (!has_multisync) {
+		sync_out = drm_syncobj_find(file_priv, out_sync);
+		if (sync_out) {
+			drm_syncobj_replace_fence(sync_out, done_fence);
+			drm_syncobj_put(sync_out);
+		}
+		return;
+	}
+
+	/* If multiple semaphores extension is supported */
+	if (se->out_sync_count) {
+		for (i = 0; i < se->out_sync_count; i++) {
+			drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+						  done_fence);
+			drm_syncobj_put(se->out_syncs[i].syncobj);
+		}
+		kvfree(se->out_syncs);
+	}
+}
+
+static int
+v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
+			   struct v3d_dev *v3d,
+			   struct drm_v3d_submit_csd *args,
+			   struct v3d_csd_job **job,
+			   struct v3d_job **clean_job,
+			   struct v3d_submit_ext *se,
+			   struct ww_acquire_ctx *acquire_ctx)
+{
+	int ret;
+
+	ret = v3d_job_allocate((void *)job, sizeof(**job));
+	if (ret)
+		return ret;
+
+	ret = v3d_job_init(v3d, file_priv, &(*job)->base,
+			   v3d_job_free, args->in_sync, se, V3D_CSD);
+	if (ret)
+		return ret;
+
+	ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
+	if (ret)
+		return ret;
+
+	ret = v3d_job_init(v3d, file_priv, *clean_job,
+			   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+	if (ret)
+		return ret;
+
+	(*job)->args = *args;
+
+	ret = v3d_lookup_bos(&v3d->drm, file_priv, *clean_job,
+			     args->bo_handles, args->bo_handle_count);
+	if (ret)
+		return ret;
+
+	return v3d_lock_bo_reservations(*clean_job, acquire_ctx);
+}
+
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+	unsigned int i;
+
+	if (!(se && se->out_sync_count))
+		return;
+
+	for (i = 0; i < se->out_sync_count; i++)
+		drm_syncobj_put(se->out_syncs[i].syncobj);
+	kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+			    struct v3d_submit_ext *se,
+			    u32 count, u64 handles)
+{
+	struct drm_v3d_sem __user *post_deps;
+	int i, ret;
+
+	if (!count)
+		return 0;
+
+	se->out_syncs = (struct v3d_submit_outsync *)
+			kvmalloc_array(count,
+				       sizeof(struct v3d_submit_outsync),
+				       GFP_KERNEL);
+	if (!se->out_syncs)
+		return -ENOMEM;
+
+	post_deps = u64_to_user_ptr(handles);
+
+	for (i = 0; i < count; i++) {
+		struct drm_v3d_sem out;
+
+		if (copy_from_user(&out, post_deps++, sizeof(out))) {
+			ret = -EFAULT;
+			DRM_DEBUG("Failed to copy post dep handles\n");
+			goto fail;
+		}
+
+		se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+							    out.handle);
+		if (!se->out_syncs[i].syncobj) {
+			ret = -EINVAL;
+			goto fail;
+		}
+	}
+	se->out_sync_count = count;
+
+	return 0;
+
+fail:
+	for (i--; i >= 0; i--)
+		drm_syncobj_put(se->out_syncs[i].syncobj);
+	kvfree(se->out_syncs);
+
+	return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+			      struct drm_v3d_extension __user *ext,
+			      struct v3d_submit_ext *se)
+{
+	struct drm_v3d_multi_sync multisync;
+	int ret;
+
+	if (se->in_sync_count || se->out_sync_count) {
+		DRM_DEBUG("Two multisync extensions were added to the same job.");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&multisync, ext, sizeof(multisync)))
+		return -EFAULT;
+
+	if (multisync.pad)
+		return -EINVAL;
+
+	ret = v3d_get_multisync_post_deps(file_priv, se, multisync.out_sync_count,
+					  multisync.out_syncs);
+	if (ret)
+		return ret;
+
+	se->in_sync_count = multisync.in_sync_count;
+	se->in_syncs = multisync.in_syncs;
+	se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+	se->wait_stage = multisync.wait_stage;
+
+	return 0;
+}
+
+/* Get data for the indirect CSD job submission. */
+static int
+v3d_get_cpu_indirect_csd_params(struct drm_file *file_priv,
+				struct drm_v3d_extension __user *ext,
+				struct v3d_cpu_job *job)
+{
+	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+	struct v3d_dev *v3d = v3d_priv->v3d;
+	struct drm_v3d_indirect_csd indirect_csd;
+	struct v3d_indirect_csd_info *info = &job->indirect_csd;
+
+	if (!job) {
+		DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+		return -EINVAL;
+	}
+
+	if (job->job_type) {
+		DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&indirect_csd, ext, sizeof(indirect_csd)))
+		return -EFAULT;
+
+	if (!v3d_has_csd(v3d)) {
+		DRM_DEBUG("Attempting CSD submit on non-CSD hardware.\n");
+		return -EINVAL;
+	}
+
+	job->job_type = V3D_CPU_JOB_TYPE_INDIRECT_CSD;
+	info->offset = indirect_csd.offset;
+	info->wg_size = indirect_csd.wg_size;
+	memcpy(&info->wg_uniform_offsets, &indirect_csd.wg_uniform_offsets,
+	       sizeof(indirect_csd.wg_uniform_offsets));
+
+	info->indirect = drm_gem_object_lookup(file_priv, indirect_csd.indirect);
+
+	return v3d_setup_csd_jobs_and_bos(file_priv, v3d, &indirect_csd.submit,
+					  &info->job, &info->clean_job,
+					  NULL, &info->acquire_ctx);
+}
+
+/* Get data for the query timestamp job submission. */
+static int
+v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
+				   struct drm_v3d_extension __user *ext,
+				   struct v3d_cpu_job *job)
+{
+	u32 __user *offsets, *syncs;
+	struct drm_v3d_timestamp_query timestamp;
+
+	if (!job) {
+		DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+		return -EINVAL;
+	}
+
+	if (job->job_type) {
+		DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&timestamp, ext, sizeof(timestamp)))
+		return -EFAULT;
+
+	if (timestamp.pad)
+		return -EINVAL;
+
+	job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY;
+
+	job->timestamp_query.queries = kvmalloc_array(timestamp.count,
+						      sizeof(struct v3d_timestamp_query),
+						      GFP_KERNEL);
+	if (!job->timestamp_query.queries)
+		return -ENOMEM;
+
+	offsets = u64_to_user_ptr(timestamp.offsets);
+	syncs = u64_to_user_ptr(timestamp.syncs);
+
+	for (int i = 0; i < timestamp.count; i++) {
+		u32 offset, sync;
+
+		if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+			kvfree(job->timestamp_query.queries);
+			return -EFAULT;
+		}
+
+		job->timestamp_query.queries[i].offset = offset;
+
+		if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+			kvfree(job->timestamp_query.queries);
+			return -EFAULT;
+		}
+
+		job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+	}
+	job->timestamp_query.count = timestamp.count;
+
+	return 0;
+}
+
+static int
+v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
+				   struct drm_v3d_extension __user *ext,
+				   struct v3d_cpu_job *job)
+{
+	u32 __user *syncs;
+	struct drm_v3d_reset_timestamp_query reset;
+
+	if (!job) {
+		DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+		return -EINVAL;
+	}
+
+	if (job->job_type) {
+		DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&reset, ext, sizeof(reset)))
+		return -EFAULT;
+
+	job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY;
+
+	job->timestamp_query.queries = kvmalloc_array(reset.count,
+						      sizeof(struct v3d_timestamp_query),
+						      GFP_KERNEL);
+	if (!job->timestamp_query.queries)
+		return -ENOMEM;
+
+	syncs = u64_to_user_ptr(reset.syncs);
+
+	for (int i = 0; i < reset.count; i++) {
+		u32 sync;
+
+		job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
+
+		if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+			kvfree(job->timestamp_query.queries);
+			return -EFAULT;
+		}
+
+		job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+	}
+	job->timestamp_query.count = reset.count;
+
+	return 0;
+}
+
+/* Get data for the copy timestamp query results job submission. */
+static int
+v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
+				      struct drm_v3d_extension __user *ext,
+				      struct v3d_cpu_job *job)
+{
+	u32 __user *offsets, *syncs;
+	struct drm_v3d_copy_timestamp_query copy;
+	int i;
+
+	if (!job) {
+		DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+		return -EINVAL;
+	}
+
+	if (job->job_type) {
+		DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&copy, ext, sizeof(copy)))
+		return -EFAULT;
+
+	if (copy.pad)
+		return -EINVAL;
+
+	job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY;
+
+	job->timestamp_query.queries = kvmalloc_array(copy.count,
+						      sizeof(struct v3d_timestamp_query),
+						      GFP_KERNEL);
+	if (!job->timestamp_query.queries)
+		return -ENOMEM;
+
+	offsets = u64_to_user_ptr(copy.offsets);
+	syncs = u64_to_user_ptr(copy.syncs);
+
+	for (i = 0; i < copy.count; i++) {
+		u32 offset, sync;
+
+		if (copy_from_user(&offset, offsets++, sizeof(offset))) {
+			kvfree(job->timestamp_query.queries);
+			return -EFAULT;
+		}
+
+		job->timestamp_query.queries[i].offset = offset;
+
+		if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+			kvfree(job->timestamp_query.queries);
+			return -EFAULT;
+		}
+
+		job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+	}
+	job->timestamp_query.count = copy.count;
+
+	job->copy.do_64bit = copy.do_64bit;
+	job->copy.do_partial = copy.do_partial;
+	job->copy.availability_bit = copy.availability_bit;
+	job->copy.offset = copy.offset;
+	job->copy.stride = copy.stride;
+
+	return 0;
+}
+
+static int
+v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
+				     struct drm_v3d_extension __user *ext,
+				     struct v3d_cpu_job *job)
+{
+	u32 __user *syncs;
+	u64 __user *kperfmon_ids;
+	struct drm_v3d_reset_performance_query reset;
+
+	if (!job) {
+		DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+		return -EINVAL;
+	}
+
+	if (job->job_type) {
+		DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&reset, ext, sizeof(reset)))
+		return -EFAULT;
+
+	job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
+
+	job->performance_query.queries = kvmalloc_array(reset.count,
+							sizeof(struct v3d_performance_query),
+							GFP_KERNEL);
+	if (!job->performance_query.queries)
+		return -ENOMEM;
+
+	syncs = u64_to_user_ptr(reset.syncs);
+	kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
+
+	for (int i = 0; i < reset.count; i++) {
+		u32 sync;
+		u64 ids;
+		u32 __user *ids_pointer;
+		u32 id;
+
+		if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+			kvfree(job->performance_query.queries);
+			return -EFAULT;
+		}
+
+		job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+		if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+			kvfree(job->performance_query.queries);
+			return -EFAULT;
+		}
+
+		ids_pointer = u64_to_user_ptr(ids);
+
+		for (int j = 0; j < reset.nperfmons; j++) {
+			if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+				kvfree(job->performance_query.queries);
+				return -EFAULT;
+			}
+
+			job->performance_query.queries[i].kperfmon_ids[j] = id;
+		}
+	}
+	job->performance_query.count = reset.count;
+	job->performance_query.nperfmons = reset.nperfmons;
+
+	return 0;
+}
+
+static int
+v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
+					  struct drm_v3d_extension __user *ext,
+					  struct v3d_cpu_job *job)
+{
+	u32 __user *syncs;
+	u64 __user *kperfmon_ids;
+	struct drm_v3d_copy_performance_query copy;
+
+	if (!job) {
+		DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
+		return -EINVAL;
+	}
+
+	if (job->job_type) {
+		DRM_DEBUG("Two CPU job extensions were added to the same CPU job.\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&copy, ext, sizeof(copy)))
+		return -EFAULT;
+
+	if (copy.pad)
+		return -EINVAL;
+
+	job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
+
+	job->performance_query.queries = kvmalloc_array(copy.count,
+							sizeof(struct v3d_performance_query),
+							GFP_KERNEL);
+	if (!job->performance_query.queries)
+		return -ENOMEM;
+
+	syncs = u64_to_user_ptr(copy.syncs);
+	kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
+
+	for (int i = 0; i < copy.count; i++) {
+		u32 sync;
+		u64 ids;
+		u32 __user *ids_pointer;
+		u32 id;
+
+		if (copy_from_user(&sync, syncs++, sizeof(sync))) {
+			kvfree(job->performance_query.queries);
+			return -EFAULT;
+		}
+
+		job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+
+		if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
+			kvfree(job->performance_query.queries);
+			return -EFAULT;
+		}
+
+		ids_pointer = u64_to_user_ptr(ids);
+
+		for (int j = 0; j < copy.nperfmons; j++) {
+			if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
+				kvfree(job->performance_query.queries);
+				return -EFAULT;
+			}
+
+			job->performance_query.queries[i].kperfmon_ids[j] = id;
+		}
+	}
+	job->performance_query.count = copy.count;
+	job->performance_query.nperfmons = copy.nperfmons;
+	job->performance_query.ncounters = copy.ncounters;
+
+	job->copy.do_64bit = copy.do_64bit;
+	job->copy.do_partial = copy.do_partial;
+	job->copy.availability_bit = copy.availability_bit;
+	job->copy.offset = copy.offset;
+	job->copy.stride = copy.stride;
+
+	return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+		   u64 ext_handles,
+		   struct v3d_submit_ext *se,
+		   struct v3d_cpu_job *job)
+{
+	struct drm_v3d_extension __user *user_ext;
+	int ret;
+
+	user_ext = u64_to_user_ptr(ext_handles);
+	while (user_ext) {
+		struct drm_v3d_extension ext;
+
+		if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+			DRM_DEBUG("Failed to copy submit extension\n");
+			return -EFAULT;
+		}
+
+		switch (ext.id) {
+		case DRM_V3D_EXT_ID_MULTI_SYNC:
+			ret = v3d_get_multisync_submit_deps(file_priv, user_ext, se);
+			break;
+		case DRM_V3D_EXT_ID_CPU_INDIRECT_CSD:
+			ret = v3d_get_cpu_indirect_csd_params(file_priv, user_ext, job);
+			break;
+		case DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY:
+			ret = v3d_get_cpu_timestamp_query_params(file_priv, user_ext, job);
+			break;
+		case DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY:
+			ret = v3d_get_cpu_reset_timestamp_params(file_priv, user_ext, job);
+			break;
+		case DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY:
+			ret = v3d_get_cpu_copy_query_results_params(file_priv, user_ext, job);
+			break;
+		case DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY:
+			ret = v3d_get_cpu_reset_performance_params(file_priv, user_ext, job);
+			break;
+		case DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY:
+			ret = v3d_get_cpu_copy_performance_query_params(file_priv, user_ext, job);
+			break;
+		default:
+			DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+			return -EINVAL;
+		}
+
+		if (ret)
+			return ret;
+
+		user_ext = u64_to_user_ptr(ext.next);
+	}
+
+	return 0;
+}
+
+/**
+ * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * This is the main entrypoint for userspace to submit a 3D frame to
+ * the GPU.  Userspace provides the binner command list (if
+ * applicable), and the kernel sets up the render command list to draw
+ * to the framebuffer described in the ioctl, using the command lists
+ * that the 3D engine's binner will produce.
+ */
+int
+v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+	struct drm_v3d_submit_cl *args = data;
+	struct v3d_submit_ext se = {0};
+	struct v3d_bin_job *bin = NULL;
+	struct v3d_render_job *render = NULL;
+	struct v3d_job *clean_job = NULL;
+	struct v3d_job *last_job;
+	struct ww_acquire_ctx acquire_ctx;
+	int ret = 0;
+
+	trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
+	if (args->pad)
+		return -EINVAL;
+
+	if (args->flags &&
+	    args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+			    DRM_V3D_SUBMIT_EXTENSION)) {
+		DRM_INFO("invalid flags: %d\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+		if (ret) {
+			DRM_DEBUG("Failed to get extensions.\n");
+			return ret;
+		}
+	}
+
+	ret = v3d_job_allocate((void *)&render, sizeof(*render));
+	if (ret)
+		return ret;
+
+	ret = v3d_job_init(v3d, file_priv, &render->base,
+			   v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+	if (ret)
+		goto fail;
+
+	render->start = args->rcl_start;
+	render->end = args->rcl_end;
+	INIT_LIST_HEAD(&render->unref_list);
+
+	if (args->bcl_start != args->bcl_end) {
+		ret = v3d_job_allocate((void *)&bin, sizeof(*bin));
+		if (ret)
+			goto fail;
+
+		ret = v3d_job_init(v3d, file_priv, &bin->base,
+				   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+		if (ret)
+			goto fail;
+
+		bin->start = args->bcl_start;
+		bin->end = args->bcl_end;
+		bin->qma = args->qma;
+		bin->qms = args->qms;
+		bin->qts = args->qts;
+		bin->render = render;
+	}
+
+	if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+		ret = v3d_job_allocate((void *)&clean_job, sizeof(*clean_job));
+		if (ret)
+			goto fail;
+
+		ret = v3d_job_init(v3d, file_priv, clean_job,
+				   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
+		if (ret)
+			goto fail;
+
+		last_job = clean_job;
+	} else {
+		last_job = &render->base;
+	}
+
+	ret = v3d_lookup_bos(dev, file_priv, last_job,
+			     args->bo_handles, args->bo_handle_count);
+	if (ret)
+		goto fail;
+
+	ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
+	if (ret)
+		goto fail;
+
+	if (args->perfmon_id) {
+		render->base.perfmon = v3d_perfmon_find(v3d_priv,
+							args->perfmon_id);
+
+		if (!render->base.perfmon) {
+			ret = -ENOENT;
+			goto fail_perfmon;
+		}
+	}
+
+	mutex_lock(&v3d->sched_lock);
+	if (bin) {
+		bin->base.perfmon = render->base.perfmon;
+		v3d_perfmon_get(bin->base.perfmon);
+		v3d_push_job(&bin->base);
+
+		ret = drm_sched_job_add_dependency(&render->base.base,
+						   dma_fence_get(bin->base.done_fence));
+		if (ret)
+			goto fail_unreserve;
+	}
+
+	v3d_push_job(&render->base);
+
+	if (clean_job) {
+		struct dma_fence *render_fence =
+			dma_fence_get(render->base.done_fence);
+		ret = drm_sched_job_add_dependency(&clean_job->base,
+						   render_fence);
+		if (ret)
+			goto fail_unreserve;
+		clean_job->perfmon = render->base.perfmon;
+		v3d_perfmon_get(clean_job->perfmon);
+		v3d_push_job(clean_job);
+	}
+
+	mutex_unlock(&v3d->sched_lock);
+
+	v3d_attach_fences_and_unlock_reservation(file_priv,
+						 last_job,
+						 &acquire_ctx,
+						 args->out_sync,
+						 &se,
+						 last_job->done_fence);
+
+	v3d_job_put(&bin->base);
+	v3d_job_put(&render->base);
+	v3d_job_put(clean_job);
+
+	return 0;
+
+fail_unreserve:
+	mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+	drm_gem_unlock_reservations(last_job->bo,
+				    last_job->bo_count, &acquire_ctx);
+fail:
+	v3d_job_cleanup((void *)bin);
+	v3d_job_cleanup((void *)render);
+	v3d_job_cleanup(clean_job);
+	v3d_put_multisync_post_deps(&se);
+
+	return ret;
+}
+
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct drm_v3d_submit_tfu *args = data;
+	struct v3d_submit_ext se = {0};
+	struct v3d_tfu_job *job = NULL;
+	struct ww_acquire_ctx acquire_ctx;
+	int ret = 0;
+
+	trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+		DRM_DEBUG("invalid flags: %d\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+		if (ret) {
+			DRM_DEBUG("Failed to get extensions.\n");
+			return ret;
+		}
+	}
+
+	ret = v3d_job_allocate((void *)&job, sizeof(*job));
+	if (ret)
+		return ret;
+
+	ret = v3d_job_init(v3d, file_priv, &job->base,
+			   v3d_job_free, args->in_sync, &se, V3D_TFU);
+	if (ret)
+		goto fail;
+
+	job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
+			       sizeof(*job->base.bo), GFP_KERNEL);
+	if (!job->base.bo) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	job->args = *args;
+
+	for (job->base.bo_count = 0;
+	     job->base.bo_count < ARRAY_SIZE(args->bo_handles);
+	     job->base.bo_count++) {
+		struct drm_gem_object *bo;
+
+		if (!args->bo_handles[job->base.bo_count])
+			break;
+
+		bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
+		if (!bo) {
+			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+				  job->base.bo_count,
+				  args->bo_handles[job->base.bo_count]);
+			ret = -ENOENT;
+			goto fail;
+		}
+		job->base.bo[job->base.bo_count] = bo;
+	}
+
+	ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
+	if (ret)
+		goto fail;
+
+	mutex_lock(&v3d->sched_lock);
+	v3d_push_job(&job->base);
+	mutex_unlock(&v3d->sched_lock);
+
+	v3d_attach_fences_and_unlock_reservation(file_priv,
+						 &job->base, &acquire_ctx,
+						 args->out_sync,
+						 &se,
+						 job->base.done_fence);
+
+	v3d_job_put(&job->base);
+
+	return 0;
+
+fail:
+	v3d_job_cleanup((void *)job);
+	v3d_put_multisync_post_deps(&se);
+
+	return ret;
+}
+
+/**
+ * v3d_submit_csd_ioctl() - Submits a CSD (compute shader) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the CSD, which we don't
+ * need to validate since the CSD is behind the MMU.
+ */
+int
+v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+	struct drm_v3d_submit_csd *args = data;
+	struct v3d_submit_ext se = {0};
+	struct v3d_csd_job *job = NULL;
+	struct v3d_job *clean_job = NULL;
+	struct ww_acquire_ctx acquire_ctx;
+	int ret;
+
+	trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+
+	if (args->pad)
+		return -EINVAL;
+
+	if (!v3d_has_csd(v3d)) {
+		DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
+		return -EINVAL;
+	}
+
+	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+		DRM_INFO("invalid flags: %d\n", args->flags);
+		return -EINVAL;
+	}
+
+	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+		ret = v3d_get_extensions(file_priv, args->extensions, &se, NULL);
+		if (ret) {
+			DRM_DEBUG("Failed to get extensions.\n");
+			return ret;
+		}
+	}
+
+	ret = v3d_setup_csd_jobs_and_bos(file_priv, v3d, args,
+					 &job, &clean_job, &se,
+					 &acquire_ctx);
+	if (ret)
+		goto fail;
+
+	if (args->perfmon_id) {
+		job->base.perfmon = v3d_perfmon_find(v3d_priv,
+						     args->perfmon_id);
+		if (!job->base.perfmon) {
+			ret = -ENOENT;
+			goto fail_perfmon;
+		}
+	}
+
+	mutex_lock(&v3d->sched_lock);
+	v3d_push_job(&job->base);
+
+	ret = drm_sched_job_add_dependency(&clean_job->base,
+					   dma_fence_get(job->base.done_fence));
+	if (ret)
+		goto fail_unreserve;
+
+	v3d_push_job(clean_job);
+	mutex_unlock(&v3d->sched_lock);
+
+	v3d_attach_fences_and_unlock_reservation(file_priv,
+						 clean_job,
+						 &acquire_ctx,
+						 args->out_sync,
+						 &se,
+						 clean_job->done_fence);
+
+	v3d_job_put(&job->base);
+	v3d_job_put(clean_job);
+
+	return 0;
+
+fail_unreserve:
+	mutex_unlock(&v3d->sched_lock);
+fail_perfmon:
+	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+				    &acquire_ctx);
+fail:
+	v3d_job_cleanup((void *)job);
+	v3d_job_cleanup(clean_job);
+	v3d_put_multisync_post_deps(&se);
+
+	return ret;
+}
+
+static const unsigned int cpu_job_bo_handle_count[] = {
+	[V3D_CPU_JOB_TYPE_INDIRECT_CSD] = 1,
+	[V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = 1,
+	[V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = 1,
+	[V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = 2,
+	[V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = 0,
+	[V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = 1,
+};
+
+/**
+ * v3d_submit_cpu_ioctl() - Submits a CPU job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace specifies the CPU job type and data required to perform its
+ * operations through the drm_v3d_extension struct.
+ */
+int
+v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct v3d_dev *v3d = to_v3d_dev(dev);
+	struct drm_v3d_submit_cpu *args = data;
+	struct v3d_submit_ext se = {0};
+	struct v3d_submit_ext *out_se = NULL;
+	struct v3d_cpu_job *cpu_job = NULL;
+	struct v3d_csd_job *csd_job = NULL;
+	struct v3d_job *clean_job = NULL;
+	struct ww_acquire_ctx acquire_ctx;
+	int ret;
+
+	if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+		DRM_INFO("Invalid flags: %d\n", args->flags);
+		return -EINVAL;
+	}
+
+	ret = v3d_job_allocate((void *)&cpu_job, sizeof(*cpu_job));
+	if (ret)
+		return ret;
+
+	if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+		ret = v3d_get_extensions(file_priv, args->extensions, &se, cpu_job);
+		if (ret) {
+			DRM_DEBUG("Failed to get extensions.\n");
+			goto fail;
+		}
+	}
+
+	/* Every CPU job must have a CPU job user extension */
+	if (!cpu_job->job_type) {
+		DRM_DEBUG("CPU job must have a CPU job user extension.\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (args->bo_handle_count != cpu_job_bo_handle_count[cpu_job->job_type]) {
+		DRM_DEBUG("This CPU job was not submitted with the proper number of BOs.\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	trace_v3d_submit_cpu_ioctl(&v3d->drm, cpu_job->job_type);
+
+	ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
+			   v3d_job_free, 0, &se, V3D_CPU);
+	if (ret)
+		goto fail;
+
+	clean_job = cpu_job->indirect_csd.clean_job;
+	csd_job = cpu_job->indirect_csd.job;
+
+	if (args->bo_handle_count) {
+		ret = v3d_lookup_bos(dev, file_priv, &cpu_job->base,
+				     args->bo_handles, args->bo_handle_count);
+		if (ret)
+			goto fail;
+
+		ret = v3d_lock_bo_reservations(&cpu_job->base, &acquire_ctx);
+		if (ret)
+			goto fail;
+	}
+
+	mutex_lock(&v3d->sched_lock);
+	v3d_push_job(&cpu_job->base);
+
+	switch (cpu_job->job_type) {
+	case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+		ret = drm_sched_job_add_dependency(&csd_job->base.base,
+						   dma_fence_get(cpu_job->base.done_fence));
+		if (ret)
+			goto fail_unreserve;
+
+		v3d_push_job(&csd_job->base);
+
+		ret = drm_sched_job_add_dependency(&clean_job->base,
+						   dma_fence_get(csd_job->base.done_fence));
+		if (ret)
+			goto fail_unreserve;
+
+		v3d_push_job(clean_job);
+
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&v3d->sched_lock);
+
+	out_se = (cpu_job->job_type == V3D_CPU_JOB_TYPE_INDIRECT_CSD) ? NULL : &se;
+
+	v3d_attach_fences_and_unlock_reservation(file_priv,
+						 &cpu_job->base,
+						 &acquire_ctx, 0,
+						 out_se, cpu_job->base.done_fence);
+
+	switch (cpu_job->job_type) {
+	case V3D_CPU_JOB_TYPE_INDIRECT_CSD:
+		v3d_attach_fences_and_unlock_reservation(file_priv,
+							 clean_job,
+							 &cpu_job->indirect_csd.acquire_ctx,
+							 0, &se, clean_job->done_fence);
+		break;
+	default:
+		break;
+	}
+
+	v3d_job_put(&cpu_job->base);
+	v3d_job_put(&csd_job->base);
+	v3d_job_put(clean_job);
+
+	return 0;
+
+fail_unreserve:
+	mutex_unlock(&v3d->sched_lock);
+
+	drm_gem_unlock_reservations(cpu_job->base.bo, cpu_job->base.bo_count,
+				    &acquire_ctx);
+
+	drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
+				    &cpu_job->indirect_csd.acquire_ctx);
+
+fail:
+	v3d_job_cleanup((void *)cpu_job);
+	v3d_job_cleanup((void *)csd_job);
+	v3d_job_cleanup(clean_job);
+	v3d_put_multisync_post_deps(&se);
+	kvfree(cpu_job->timestamp_query.queries);
+	kvfree(cpu_job->performance_query.queries);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 7aa8dc356e54c2226991f4534f37b3f29ff3c721..5917b94148f5a2425b3d6ad2babcfe2d6458750a 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -225,6 +225,63 @@ TRACE_EVENT(v3d_submit_csd,
 		      __entry->seqno)
 );
 
+TRACE_EVENT(v3d_submit_cpu_ioctl,
+	    TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+	    TP_ARGS(dev, job_type),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(enum v3d_cpu_job_type, job_type)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->job_type = job_type;
+			   ),
+
+	    TP_printk("dev=%u, job_type=%d",
+		      __entry->dev,
+		      __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_begin,
+	    TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+	    TP_ARGS(dev, job_type),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(enum v3d_cpu_job_type, job_type)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->job_type = job_type;
+			   ),
+
+	    TP_printk("dev=%u, job_type=%d",
+		      __entry->dev,
+		      __entry->job_type)
+);
+
+TRACE_EVENT(v3d_cpu_job_end,
+	    TP_PROTO(struct drm_device *dev, enum v3d_cpu_job_type job_type),
+	    TP_ARGS(dev, job_type),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(enum v3d_cpu_job_type, job_type)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->job_type = job_type;
+			   ),
+
+	    TP_printk("dev=%u, job_type=%d",
+		      __entry->dev,
+		      __entry->job_type)
+);
+
 TRACE_EVENT(v3d_cache_clean_begin,
 	    TP_PROTO(struct drm_device *dev),
 	    TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 047b958123341bf86ee83a5f1ef89548da34afea..cd9e66a06596a7a834af4917aa9f1f59811827a3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -182,7 +182,7 @@ DEFINE_DRM_GEM_FOPS(vbox_fops);
 
 static const struct drm_driver driver = {
 	.driver_features =
-	    DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+	    DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_CURSOR_HOTSPOT,
 
 	.fops = &vbox_fops,
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 341edd982cb3be10054266989d3728d897e3e3a3..9ff3bade979577df0bc2e6e9653b3fb8fd486062 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -429,8 +429,8 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
 	flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
 		VBOX_MOUSE_POINTER_ALPHA;
 	hgsmi_update_pointer_shape(vbox->guest_pool, flags,
-				   min_t(u32, max(fb->hot_x, 0), width),
-				   min_t(u32, max(fb->hot_y, 0), height),
+				   min_t(u32, max(new_state->hotspot_x, 0), width),
+				   min_t(u32, max(new_state->hotspot_y, 0), height),
 				   width, height, vbox->cursor_data, data_size);
 
 	mutex_unlock(&vbox->hw_mutex);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 4334c76084084284ab0ff7d61375efe6b2953e83..f8e9abe647b927b211abb4bbc0751ea318d80369 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -177,7 +177,7 @@ static const struct drm_driver driver = {
 	 * out via drm_device::driver_features:
 	 */
 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
-			   DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
+			   DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE | DRIVER_CURSOR_HOTSPOT,
 	.open = virtio_gpu_driver_open,
 	.postclose = virtio_gpu_driver_postclose,
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a2e045f3a0004a1be3d7681fff95c79107cc4432..a72a2dbda031c220e62d018f3b77aab4bb0401d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -79,6 +79,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
 {
 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
 										 plane);
+	struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+										 plane);
 	bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
 	struct drm_crtc_state *crtc_state;
 	int ret;
@@ -86,6 +88,14 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
 	if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
 		return 0;
 
+	/*
+	 * Ignore damage clips if the framebuffer attached to the plane's state
+	 * has changed since the last plane update (page-flip). In this case, a
+	 * full plane update should happen because uploads are done per-buffer.
+	 */
+	if (old_plane_state->fb != new_plane_state->fb)
+		new_plane_state->ignore_damage_clips = true;
+
 	crtc_state = drm_atomic_get_crtc_state(state,
 					       new_plane_state->crtc);
 	if (IS_ERR(crtc_state))
@@ -323,16 +333,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 		DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
 			  plane->state->crtc_x,
 			  plane->state->crtc_y,
-			  plane->state->fb ? plane->state->fb->hot_x : 0,
-			  plane->state->fb ? plane->state->fb->hot_y : 0);
+			  plane->state->hotspot_x,
+			  plane->state->hotspot_y);
 		output->cursor.hdr.type =
 			cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
 		output->cursor.resource_id = cpu_to_le32(handle);
 		if (plane->state->fb) {
 			output->cursor.hot_x =
-				cpu_to_le32(plane->state->fb->hot_x);
+				cpu_to_le32(plane->state->hotspot_x);
 			output->cursor.hot_y =
-				cpu_to_le32(plane->state->fb->hot_y);
+				cpu_to_le32(plane->state->hotspot_y);
 		} else {
 			output->cursor.hot_x = cpu_to_le32(0);
 			output->cursor.hot_y = cpu_to_le32(0);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8b24ecf60e3ec5aba4da7a5fd6bcc94f1e6d6e23..d3e308fdfd5be80bb5c8a606eccceae82810da9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1611,7 +1611,7 @@ static const struct file_operations vmwgfx_driver_fops = {
 
 static const struct drm_driver driver = {
 	.driver_features =
-	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM,
+	DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_CURSOR_HOTSPOT,
 	.ioctls = vmw_ioctls,
 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
 	.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 818b7f109f5380edc379f0b80e00649721abf31c..5fd0ccaa0b41b71adb6cdeacab7eae3062fcadf7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -768,13 +768,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
 	s32 hotspot_x, hotspot_y;
 
-	hotspot_x = du->hotspot_x;
-	hotspot_y = du->hotspot_y;
-
-	if (new_state->fb) {
-		hotspot_x += new_state->fb->hot_x;
-		hotspot_y += new_state->fb->hot_y;
-	}
+	hotspot_x = du->hotspot_x + new_state->hotspot_x;
+	hotspot_y = du->hotspot_y + new_state->hotspot_y;
 
 	du->cursor_surface = vps->surf;
 	du->cursor_bo = vps->bo;
@@ -837,10 +832,21 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
 {
 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
 									   plane);
+	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+									   plane);
 	struct drm_crtc_state *crtc_state = NULL;
 	struct drm_framebuffer *new_fb = new_state->fb;
+	struct drm_framebuffer *old_fb = old_state->fb;
 	int ret;
 
+	/*
+	 * Ignore damage clips if the framebuffer attached to the plane's state
+	 * has changed since the last plane update (page-flip). In this case, a
+	 * full plane update should happen because uploads are done per-buffer.
+	 */
+	if (old_fb != new_fb)
+		new_state->ignore_damage_clips = true;
+
 	if (new_state->crtc)
 		crtc_state = drm_atomic_get_new_crtc_state(state,
 							   new_state->crtc);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index a7f8611be6f420a7473726f0b2b9185fe4286e8c..db3bb4afbfc46f9280f513a53391f4bb8a6f9b6a 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -27,7 +27,6 @@
 #include <drm/drm_managed.h>
 #include <drm/drm_mode_config.h>
 #include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index a4dccdcda26fcbfa44979cedc31bb6c2e0bc038f..d7dddd99d325e89dac318519de5178978485d980 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -505,6 +505,7 @@ int picolcd_init_framebuffer(struct picolcd_data *data)
 		dev_err(dev, "can't get a free page for framebuffer\n");
 		goto err_nomem;
 	}
+	info->flags |= FBINFO_VIRTFB;
 	info->screen_buffer = fbdata->bitmap;
 	info->fix.smem_start = (unsigned long)fbdata->bitmap;
 	memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE);
diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig
index 9be52101bc4f2d8254da5a925faea2d3df0461b4..2498f9079b756eefa99075b2b34b602665408507 100644
--- a/drivers/media/pci/ivtv/Kconfig
+++ b/drivers/media/pci/ivtv/Kconfig
@@ -48,9 +48,7 @@ config VIDEO_IVTV_ALSA
 config VIDEO_FB_IVTV
 	tristate "Conexant cx23415 framebuffer support"
 	depends on VIDEO_IVTV && FB
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
+	select FB_IOMEM_HELPERS
 	help
 	  This is a framebuffer driver for the Conexant cx23415 MPEG
 	  encoder/decoder.
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 23c8c094e791b9ee5a6e7ab1dafd36166eaa4cf0..410477e3e621697f40c61fae9a3a1568c6174ab3 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -927,17 +927,17 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
 
 static const struct fb_ops ivtvfb_ops = {
 	.owner = THIS_MODULE,
+	.fb_read        = fb_io_read,
 	.fb_write       = ivtvfb_write,
 	.fb_check_var   = ivtvfb_check_var,
 	.fb_set_par     = ivtvfb_set_par,
 	.fb_setcolreg   = ivtvfb_setcolreg,
-	.fb_fillrect    = cfb_fillrect,
-	.fb_copyarea    = cfb_copyarea,
-	.fb_imageblit   = cfb_imageblit,
+	__FB_DEFAULT_IOMEM_OPS_DRAW,
 	.fb_cursor      = NULL,
 	.fb_ioctl       = ivtvfb_ioctl,
 	.fb_pan_display = ivtvfb_pan_display,
 	.fb_blank       = ivtvfb_blank,
+	__FB_DEFAULT_IOMEM_OPS_MMAP,
 };
 
 /* Restore hardware after firmware restart */
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index d891058b7c39a1683e778f0af804e2b53ef8cfba..846f8c99547fd5132feaa1e41093b8eab51714f9 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -63,7 +63,7 @@ config PHY_QCOM_QMP_COMBO
 	depends on DRM || DRM=n
 	select GENERIC_PHY
 	select MFD_SYSCON
-	select DRM_PANEL_BRIDGE if DRM
+	select DRM_AUX_BRIDGE if DRM_BRIDGE
 	help
 	  Enable this to support the QMP Combo PHY transceiver that is used
 	  with USB3 and DisplayPort controllers on Qualcomm chips.
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 9c87845c78ec24aa14f0993a7b35e4b0034a565f..f6c727249104fdd3dfc016ec908d792f3dd65316 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -21,7 +21,7 @@
 #include <linux/usb/typec.h>
 #include <linux/usb/typec_mux.h>
 
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
 
 #include <dt-bindings/phy/phy-qcom-qmp.h>
 
@@ -1419,8 +1419,6 @@ struct qmp_combo {
 	struct clk_hw dp_link_hw;
 	struct clk_hw dp_pixel_hw;
 
-	struct drm_bridge bridge;
-
 	struct typec_switch_dev *sw;
 	enum typec_orientation orientation;
 };
@@ -3191,44 +3189,6 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_DRM)
-static int qmp_combo_bridge_attach(struct drm_bridge *bridge,
-				   enum drm_bridge_attach_flags flags)
-{
-	struct qmp_combo *qmp = container_of(bridge, struct qmp_combo, bridge);
-	struct drm_bridge *next_bridge;
-
-	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
-		return -EINVAL;
-
-	next_bridge = devm_drm_of_get_bridge(qmp->dev, qmp->dev->of_node, 0, 0);
-	if (IS_ERR(next_bridge)) {
-		dev_err(qmp->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
-		return PTR_ERR(next_bridge);
-	}
-
-	return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
-				 DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs qmp_combo_bridge_funcs = {
-	.attach	= qmp_combo_bridge_attach,
-};
-
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
-	qmp->bridge.funcs = &qmp_combo_bridge_funcs;
-	qmp->bridge.of_node = qmp->dev->of_node;
-
-	return devm_drm_bridge_add(qmp->dev, &qmp->bridge);
-}
-#else
-static int qmp_combo_dp_register_bridge(struct qmp_combo *qmp)
-{
-	return 0;
-}
-#endif
-
 static int qmp_combo_parse_dt_lecacy_dp(struct qmp_combo *qmp, struct device_node *np)
 {
 	struct device *dev = qmp->dev;
@@ -3440,7 +3400,7 @@ static int qmp_combo_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	ret = qmp_combo_dp_register_bridge(qmp);
+	ret = drm_aux_bridge_register(dev);
 	if (ret)
 		return ret;
 
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b3634e10f6f5ec5c4476fa6ef6145220c7efa36e..c954001ae79ec2530c75caf7074e1cf947ce2358 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -86,6 +86,7 @@ config QCOM_PMIC_GLINK
 	depends on OF
 	select AUXILIARY_BUS
 	select QCOM_PDR_HELPERS
+	select DRM_AUX_HPD_BRIDGE
 	help
 	  The Qualcomm PMIC GLINK driver provides access, over GLINK, to the
 	  USB and battery firmware running on one of the coprocessors in
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index b78279e2f54cfd06bc35c8fd63551c1092625ec7..053b7393e26a8eaa8b1c47f441f791d3bc7476a6 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -11,7 +11,7 @@
 #include <linux/mutex.h>
 #include <linux/property.h>
 #include <linux/soc/qcom/pdr.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
 
 #include <linux/usb/typec_altmode.h>
 #include <linux/usb/typec_dp.h>
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
 
 	struct work_struct work;
 
-	struct drm_bridge bridge;
+	struct device *bridge;
 
 	enum typec_orientation orientation;
 	u16 svid;
@@ -230,10 +230,10 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
 	else
 		pmic_glink_altmode_enable_usb(altmode, alt_port);
 
-	if (alt_port->hpd_state)
-		drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected);
-	else
-		drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected);
+	drm_aux_hpd_bridge_notify(alt_port->bridge,
+				  alt_port->hpd_state ?
+				  connector_status_connected :
+				  connector_status_disconnected);
 
 	pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index);
 };
@@ -365,16 +365,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv
 	}
 }
 
-static int pmic_glink_altmode_attach(struct drm_bridge *bridge,
-				     enum drm_bridge_attach_flags flags)
-{
-	return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = {
-	.attach = pmic_glink_altmode_attach,
-};
-
 static void pmic_glink_altmode_put_retimer(void *data)
 {
 	typec_retimer_put(data);
@@ -464,15 +454,10 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
 		alt_port->index = port;
 		INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
 
-		alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs;
-		alt_port->bridge.of_node = to_of_node(fwnode);
-		alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
-		alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
-		ret = devm_drm_bridge_add(dev, &alt_port->bridge);
-		if (ret) {
+		alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+		if (IS_ERR(alt_port->bridge)) {
 			fwnode_handle_put(fwnode);
-			return ret;
+			return PTR_ERR(alt_port->bridge);
 		}
 
 		alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 79bcd5bd49380ae3a00096e39eb326b5ecc669d2..04c1b32a22c5e1944687608227658f8f0d77d4b4 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -663,17 +663,54 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
 	return ret;
 }
 
-static struct fb_ops lynxfb_ops = {
+static const struct fb_ops lynxfb_ops = {
 	.owner = THIS_MODULE,
+	FB_DEFAULT_IOMEM_OPS,
 	.fb_check_var =  lynxfb_ops_check_var,
 	.fb_set_par = lynxfb_ops_set_par,
 	.fb_setcolreg = lynxfb_ops_setcolreg,
 	.fb_blank = lynxfb_ops_blank,
-	.fb_fillrect = cfb_fillrect,
-	.fb_imageblit = cfb_imageblit,
-	.fb_copyarea = cfb_copyarea,
-	/* cursor */
+	.fb_pan_display = lynxfb_ops_pan_display,
+};
+
+static const struct fb_ops lynxfb_ops_with_cursor = {
+	.owner = THIS_MODULE,
+	FB_DEFAULT_IOMEM_OPS,
+	.fb_check_var =  lynxfb_ops_check_var,
+	.fb_set_par = lynxfb_ops_set_par,
+	.fb_setcolreg = lynxfb_ops_setcolreg,
+	.fb_blank = lynxfb_ops_blank,
+	.fb_pan_display = lynxfb_ops_pan_display,
+	.fb_cursor = lynxfb_ops_cursor,
+};
+
+static const struct fb_ops lynxfb_ops_accel = {
+	.owner = THIS_MODULE,
+	__FB_DEFAULT_IOMEM_OPS_RDWR,
+	.fb_check_var =  lynxfb_ops_check_var,
+	.fb_set_par = lynxfb_ops_set_par,
+	.fb_setcolreg = lynxfb_ops_setcolreg,
+	.fb_blank = lynxfb_ops_blank,
+	.fb_pan_display = lynxfb_ops_pan_display,
+	.fb_fillrect = lynxfb_ops_fillrect,
+	.fb_copyarea = lynxfb_ops_copyarea,
+	.fb_imageblit = lynxfb_ops_imageblit,
+	__FB_DEFAULT_IOMEM_OPS_MMAP,
+};
+
+static const struct fb_ops lynxfb_ops_accel_with_cursor = {
+	.owner = THIS_MODULE,
+	__FB_DEFAULT_IOMEM_OPS_RDWR,
+	.fb_check_var =  lynxfb_ops_check_var,
+	.fb_set_par = lynxfb_ops_set_par,
+	.fb_setcolreg = lynxfb_ops_setcolreg,
+	.fb_blank = lynxfb_ops_blank,
+	.fb_pan_display = lynxfb_ops_pan_display,
+	.fb_fillrect = lynxfb_ops_fillrect,
+	.fb_copyarea = lynxfb_ops_copyarea,
+	.fb_imageblit = lynxfb_ops_imageblit,
 	.fb_cursor = lynxfb_ops_cursor,
+	__FB_DEFAULT_IOMEM_OPS_MMAP,
 };
 
 static int lynxfb_set_fbinfo(struct fb_info *info, int index)
@@ -714,7 +751,6 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
 	par->index = index;
 	output->channel = &crtc->channel;
 	sm750fb_set_drv(par);
-	lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
 
 	/*
 	 * set current cursor variable and proc pointer,
@@ -731,19 +767,22 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
 	crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset;
 
 	memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
-	if (!g_hwcursor) {
-		lynxfb_ops.fb_cursor = NULL;
+	if (!g_hwcursor)
 		sm750_hw_cursor_disable(&crtc->cursor);
-	}
 
 	/* set info->fbops, must be set before fb_find_mode */
 	if (!sm750_dev->accel_off) {
 		/* use 2d acceleration */
-		lynxfb_ops.fb_fillrect = lynxfb_ops_fillrect;
-		lynxfb_ops.fb_copyarea = lynxfb_ops_copyarea;
-		lynxfb_ops.fb_imageblit = lynxfb_ops_imageblit;
+		if (!g_hwcursor)
+			info->fbops = &lynxfb_ops_accel;
+		else
+			info->fbops = &lynxfb_ops_accel_with_cursor;
+	} else {
+		if (!g_hwcursor)
+			info->fbops = &lynxfb_ops;
+		else
+			info->fbops = &lynxfb_ops_with_cursor;
 	}
-	info->fbops = &lynxfb_ops;
 
 	if (!g_fbmode[index]) {
 		g_fbmode[index] = g_def_fbmode;
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 816b9bd08355ff60bf73187a65fe985134a37710..5120942f309d4d3224a6094d06f219401de77e01 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -40,7 +40,7 @@ config TYPEC_MUX_NB7VPQ904M
 	tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
 	depends on I2C
 	depends on DRM || DRM=n
-	select DRM_PANEL_BRIDGE if DRM
+	select DRM_AUX_BRIDGE if DRM_BRIDGE
 	select REGMAP_I2C
 	help
 	  Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
index cda206cf0c38760fd28286b2062a6d324c462a67..b17826713753a62496d4af1507281b609b0ec0da 100644
--- a/drivers/usb/typec/mux/nb7vpq904m.c
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -11,7 +11,7 @@
 #include <linux/regmap.h>
 #include <linux/bitfield.h>
 #include <linux/of_graph.h>
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
 #include <linux/usb/typec_dp.h>
 #include <linux/usb/typec_mux.h>
 #include <linux/usb/typec_retimer.h>
@@ -70,8 +70,6 @@ struct nb7vpq904m {
 	bool swap_data_lanes;
 	struct typec_switch *typec_switch;
 
-	struct drm_bridge bridge;
-
 	struct mutex lock; /* protect non-concurrent retimer & switch */
 
 	enum typec_orientation orientation;
@@ -297,44 +295,6 @@ static int nb7vpq904m_retimer_set(struct typec_retimer *retimer, struct typec_re
 	return ret;
 }
 
-#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
-static int nb7vpq904m_bridge_attach(struct drm_bridge *bridge,
-				    enum drm_bridge_attach_flags flags)
-{
-	struct nb7vpq904m *nb7 = container_of(bridge, struct nb7vpq904m, bridge);
-	struct drm_bridge *next_bridge;
-
-	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
-		return -EINVAL;
-
-	next_bridge = devm_drm_of_get_bridge(&nb7->client->dev, nb7->client->dev.of_node, 0, 0);
-	if (IS_ERR(next_bridge)) {
-		dev_err(&nb7->client->dev, "failed to acquire drm_bridge: %pe\n", next_bridge);
-		return PTR_ERR(next_bridge);
-	}
-
-	return drm_bridge_attach(bridge->encoder, next_bridge, bridge,
-				 DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-}
-
-static const struct drm_bridge_funcs nb7vpq904m_bridge_funcs = {
-	.attach	= nb7vpq904m_bridge_attach,
-};
-
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
-	nb7->bridge.funcs = &nb7vpq904m_bridge_funcs;
-	nb7->bridge.of_node = nb7->client->dev.of_node;
-
-	return devm_drm_bridge_add(&nb7->client->dev, &nb7->bridge);
-}
-#else
-static int nb7vpq904m_register_bridge(struct nb7vpq904m *nb7)
-{
-	return 0;
-}
-#endif
-
 static const struct regmap_config nb7_regmap = {
 	.max_register = 0x1f,
 	.reg_bits = 8,
@@ -461,7 +421,7 @@ static int nb7vpq904m_probe(struct i2c_client *client)
 
 	gpiod_set_value(nb7->enable_gpio, 1);
 
-	ret = nb7vpq904m_register_bridge(nb7);
+	ret = drm_aux_bridge_register(dev);
 	if (ret)
 		goto err_disable_gpio;
 
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 0b2993fef564bc255fe44806fcc1a01f3f4b982c..64d5421c69e690b8d1cdc46aebf34be9f1f01ed4 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -80,6 +80,7 @@ config TYPEC_QCOM_PMIC
 	tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
 	depends on ARCH_QCOM || COMPILE_TEST
 	depends on DRM || DRM=n
+	select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE
 	help
 	  A Type-C port and Power Delivery driver which aggregates two
 	  discrete pieces of silicon in the PM8150b PMIC block: the
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 581199d37b49daaceb130b378a9e690ab8233c5d..1a2b4bddaa97e86da87b18ae5174413e3238af6a 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -18,7 +18,7 @@
 #include <linux/usb/tcpm.h>
 #include <linux/usb/typec_mux.h>
 
-#include <drm/drm_bridge.h>
+#include <drm/bridge/aux-bridge.h>
 
 #include "qcom_pmic_typec_pdphy.h"
 #include "qcom_pmic_typec_port.h"
@@ -36,7 +36,6 @@ struct pmic_typec {
 	struct pmic_typec_port	*pmic_typec_port;
 	bool			vbus_enabled;
 	struct mutex		lock;		/* VBUS state serialization */
-	struct drm_bridge	bridge;
 };
 
 #define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
@@ -150,35 +149,6 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
 	return 0;
 }
 
-#if IS_ENABLED(CONFIG_DRM)
-static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
-				     enum drm_bridge_attach_flags flags)
-{
-	return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
-}
-
-static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
-	.attach = qcom_pmic_typec_attach,
-};
-
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
-	tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
-#ifdef CONFIG_OF
-	tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
-#endif
-	tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
-	tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
-
-	return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
-}
-#else
-static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
-{
-	return 0;
-}
-#endif
-
 static int qcom_pmic_typec_probe(struct platform_device *pdev)
 {
 	struct pmic_typec *tcpm;
@@ -186,6 +156,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
 	struct device_node *np = dev->of_node;
 	const struct pmic_typec_resources *res;
 	struct regmap *regmap;
+	struct device *bridge_dev;
 	u32 base[2];
 	int ret;
 
@@ -241,14 +212,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
 	mutex_init(&tcpm->lock);
 	platform_set_drvdata(pdev, tcpm);
 
-	ret = qcom_pmic_typec_init_drm(tcpm);
-	if (ret)
-		return ret;
-
 	tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
 	if (!tcpm->tcpc.fwnode)
 		return -EINVAL;
 
+	bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
+	if (IS_ERR(bridge_dev))
+		return PTR_ERR(bridge_dev);
+
 	tcpm->tcpm_port = tcpm_register_port(tcpm->dev, &tcpm->tcpc);
 	if (IS_ERR(tcpm->tcpm_port)) {
 		ret = PTR_ERR(tcpm->tcpm_port);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 53693c826ebdd21dabbdcbc585aef4b42eee6d4b..d5909a9206ffd7087a7e5ddb5043d62e36c43f64 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -146,10 +146,8 @@ config FB_ACORN
 config FB_CLPS711X
 	tristate "CLPS711X LCD support"
 	depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+	select FB_IOMEM_HELPERS
 	select FB_MODE_HELPERS
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
 	select LCD_CLASS_DEVICE
 	select VIDEOMODE_HELPERS
 	help
@@ -180,10 +178,7 @@ config FB_IMX
 config FB_CYBER2000
 	tristate "CyberPro 2000/2010/5000 support"
 	depends on FB && PCI && (BROKEN || !SPARC64)
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	select FB_IOMEM_FOPS
+	select FB_IOMEM_HELPERS
 	select VIDEO_NOMODESET
 	help
 	  This enables support for the Integraphics CyberPro 20x0 and 5000
@@ -272,10 +267,7 @@ config FB_FM2
 config FB_ARC
 	tristate "Arc Monochrome LCD board support"
 	depends on FB && (X86 || COMPILE_TEST)
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
+	select FB_SYSMEM_HELPERS_DEFERRED
 	help
 	  This enables support for the Arc Monochrome LCD board. The board
 	  is based on the KS-108 lcd controller and is typically a matrix
@@ -1460,10 +1452,7 @@ config FB_AU1100
 config FB_AU1200
 	bool "Au1200/Au1300 LCD Driver"
 	depends on (FB = y) && MIPS_ALCHEMY
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
+	select FB_DMAMEM_HELPERS
 	help
 	  This is the framebuffer driver for the Au1200/Au1300 SOCs.
 	  It can drive various panels and CRTs by passing in kernel cmd line
@@ -1475,6 +1464,7 @@ config FB_VT8500
 	select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
 	select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
 	select FB_SYS_IMAGEBLIT
+	select FB_SYSMEM_FOPS
 	select FB_MODE_HELPERS
 	select VIDEOMODE_HELPERS
 	help
@@ -1487,6 +1477,7 @@ config FB_WM8505
 	select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
 	select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
 	select FB_SYS_IMAGEBLIT
+	select FB_SYSMEM_FOPS
 	select FB_MODE_HELPERS
 	select VIDEOMODE_HELPERS
 	help
@@ -1637,12 +1628,9 @@ config FB_SH_MOBILE_LCDC
 	depends on FB && HAVE_CLK && HAS_IOMEM
 	depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
 	depends on FB_DEVICE
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
-	select FB_DEFERRED_IO
 	select FB_BACKLIGHT
+	select FB_DEFERRED_IO
+	select FB_DMAMEM_HELPERS
 	help
 	  Frame buffer driver for the on-chip SH-Mobile LCD controller.
 
@@ -1689,11 +1677,7 @@ config FB_SMSCUFX
 	tristate "SMSC UFX6000/7000 USB Framebuffer support"
 	depends on FB && USB
 	select FB_MODE_HELPERS
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
-	select FB_DEFERRED_IO
+	select FB_SYSMEM_HELPERS_DEFERRED
 	help
 	  This is a kernel framebuffer driver for SMSC UFX USB devices.
 	  Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1706,11 +1690,7 @@ config FB_UDL
 	depends on FB && USB
 	depends on FB_DEVICE
 	select FB_MODE_HELPERS
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
-	select FB_DEFERRED_IO
+	select FB_SYSMEM_HELPERS_DEFERRED
 	help
 	  This is a kernel framebuffer driver for DisplayLink USB devices.
 	  Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
@@ -1732,10 +1712,7 @@ config FB_IBM_GXT4500
 config FB_PS3
 	tristate "PS3 GPU framebuffer driver"
 	depends on FB && PS3_PS3AV
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
+	select FB_SYSMEM_HELPERS
 	help
 	  Include support for the virtual frame buffer in the PS3 platform.
 
@@ -1800,10 +1777,7 @@ config FB_DA8XX
 config FB_VIRTUAL
 	tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
 	depends on FB
-	select FB_SYS_FILLRECT
-	select FB_SYS_COPYAREA
-	select FB_SYS_IMAGEBLIT
-	select FB_SYS_FOPS
+	select FB_SYSMEM_HELPERS
 	help
 	  This is a `virtual' frame buffer device. It operates on a chunk of
 	  unswappable kernel memory instead of on the memory of a graphics
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 163d2c9f951c3a1610f7841a6e5d5a81a2211678..f0600f6ca2548e2113b2c2081b8d30d250478a8a 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -605,7 +605,7 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
 
 static const struct fb_ops acornfb_ops = {
 	.owner		= THIS_MODULE,
-	FB_IOMEM_DEFAULT_OPS,
+	FB_DEFAULT_IOMEM_OPS,
 	.fb_check_var	= acornfb_check_var,
 	.fb_set_par	= acornfb_set_par,
 	.fb_setcolreg	= acornfb_setcolreg,
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 0399db369e709d102617be637f29e3a4f25b0ef1..47d373f04f3f0367e51672db57241e8fdbcb1079 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -829,6 +829,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
 
 static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
 {
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
 			   fb->fb.fix.smem_start, fb->fb.fix.smem_len);
 }
diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
index cff11cb04a551aee1c6becc3d9506d1f5d8bb3f4..b2408543277cee9040d1d7027ae337b821619caf 100644
--- a/drivers/video/fbdev/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
@@ -363,39 +363,6 @@ static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx,
 	}
 }
 
-static void arcfb_fillrect(struct fb_info *info,
-			   const struct fb_fillrect *rect)
-{
-	struct arcfb_par *par = info->par;
-
-	sys_fillrect(info, rect);
-
-	/* update the physical lcd */
-	arcfb_lcd_update(par, rect->dx, rect->dy, rect->width, rect->height);
-}
-
-static void arcfb_copyarea(struct fb_info *info,
-			   const struct fb_copyarea *area)
-{
-	struct arcfb_par *par = info->par;
-
-	sys_copyarea(info, area);
-
-	/* update the physical lcd */
-	arcfb_lcd_update(par, area->dx, area->dy, area->width, area->height);
-}
-
-static void arcfb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-	struct arcfb_par *par = info->par;
-
-	sys_imageblit(info, image);
-
-	/* update the physical lcd */
-	arcfb_lcd_update(par, image->dx, image->dy, image->width,
-				image->height);
-}
-
 static int arcfb_ioctl(struct fb_info *info,
 			  unsigned int cmd, unsigned long arg)
 {
@@ -436,76 +403,48 @@ static int arcfb_ioctl(struct fb_info *info,
 	}
 }
 
-/*
- * this is the access path from userspace. they can seek and write to
- * the fb. it's inefficient for them to do anything less than 64*8
- * writes since we update the lcd in each write() anyway.
- */
-static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
-			   size_t count, loff_t *ppos)
+static void arcfb_damage_range(struct fb_info *info, off_t off, size_t len)
 {
-	/* modded from epson 1355 */
-
-	unsigned long p;
-	int err;
-	unsigned int fbmemlength,x,y,w,h, bitppos, startpos, endpos, bitcount;
-	struct arcfb_par *par;
-	unsigned int xres;
-
-	if (!info->screen_buffer)
-		return -ENODEV;
-
-	p = *ppos;
-	par = info->par;
-	xres = info->var.xres;
-	fbmemlength = (xres * info->var.yres)/8;
-
-	if (p > fbmemlength)
-		return -ENOSPC;
-
-	err = 0;
-	if ((count + p) > fbmemlength) {
-		count = fbmemlength - p;
-		err = -ENOSPC;
-	}
-
-	if (count) {
-		char *base_addr;
-
-		base_addr = info->screen_buffer;
-		count -= copy_from_user(base_addr + p, buf, count);
-		*ppos += count;
-		err = -EFAULT;
-	}
-
+	struct arcfb_par *par = info->par;
+	unsigned int xres = info->var.xres;
+	unsigned int bitppos, startpos, endpos, bitcount;
+	unsigned int x, y, width, height;
 
-	bitppos = p*8;
+	bitppos = off * 8;
 	startpos = floorXres(bitppos, xres);
-	endpos = ceilXres((bitppos + (count*8)), xres);
+	endpos = ceilXres((bitppos + (len * 8)), xres);
 	bitcount = endpos - startpos;
 
 	x = startpos % xres;
 	y = startpos / xres;
-	w = xres;
-	h = bitcount / xres;
-	arcfb_lcd_update(par, x, y, w, h);
+	width = xres;
+	height = bitcount / xres;
+
+	arcfb_lcd_update(par, x, y, width, height);
+}
 
-	if (count)
-		return count;
-	return err;
+static void arcfb_damage_area(struct fb_info *info, u32 x, u32 y,
+			      u32 width, u32 height)
+{
+	struct arcfb_par *par = info->par;
+
+	/* update the physical lcd */
+	arcfb_lcd_update(par, x, y, width, height);
 }
 
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(arcfb,
+				   arcfb_damage_range,
+				   arcfb_damage_area)
+
 static const struct fb_ops arcfb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= arcfb_open,
-	.fb_read        = fb_sys_read,
-	.fb_write	= arcfb_write,
+	__FB_DEFAULT_DEFERRED_OPS_RDWR(arcfb),
 	.fb_release	= arcfb_release,
 	.fb_pan_display	= arcfb_pan_display,
-	.fb_fillrect	= arcfb_fillrect,
-	.fb_copyarea	= arcfb_copyarea,
-	.fb_imageblit	= arcfb_imageblit,
+	__FB_DEFAULT_DEFERRED_OPS_DRAW(arcfb),
 	.fb_ioctl 	= arcfb_ioctl,
+	// .fb_mmap reqires deferred I/O
 };
 
 static int arcfb_probe(struct platform_device *dev)
@@ -529,6 +468,7 @@ static int arcfb_probe(struct platform_device *dev)
 	if (!info)
 		goto err_fb_alloc;
 
+	info->flags |= FBINFO_VIRTFB;
 	info->screen_buffer = videomemory;
 	info->fbops = &arcfb_ops;
 
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index a9c8d33a6ef71f2bb119dcca108304ba455cb84f..08109ce535cd4e9a8396bfd62ca751572ecb58c4 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -342,6 +342,8 @@ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 {
 	struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
 
 	return dma_mmap_coherent(fbdev->dev, vma, fbdev->fb_mem, fbdev->fb_phys,
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 98afd385c49cda8491d012979382c41c7229a61d..6f20efc663d7e4b4e6971503c8d6682355f86e47 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1236,6 +1236,8 @@ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
 	struct au1200fb_device *fbdev = info->par;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return dma_mmap_coherent(fbdev->dev, vma,
 				 fbdev->fb_mem, fbdev->fb_phys, fbdev->fb_len);
 }
@@ -1488,15 +1490,12 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
 
 static const struct fb_ops au1200fb_fb_ops = {
 	.owner		= THIS_MODULE,
+	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 	.fb_check_var	= au1200fb_fb_check_var,
 	.fb_set_par	= au1200fb_fb_set_par,
 	.fb_setcolreg	= au1200fb_fb_setcolreg,
 	.fb_blank	= au1200fb_fb_blank,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
-	.fb_read	= fb_sys_read,
-	.fb_write	= fb_sys_write,
+	__FB_DEFAULT_DMAMEM_OPS_DRAW,
 	.fb_sync	= NULL,
 	.fb_ioctl	= au1200fb_ioctl,
 	.fb_mmap	= au1200fb_fb_mmap,
@@ -1568,6 +1567,8 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
 	fbi->fix.mmio_len = 0;
 	fbi->fix.accel = FB_ACCEL_NONE;
 
+	fbi->flags |= FBINFO_VIRTFB;
+
 	fbi->screen_buffer = fbdev->fb_mem;
 
 	au1200fb_update_fbinfo(fbi);
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index e956c90efcdcc98cfbf8ea82317618ed78094117..dcfd1fbbc7e101eca4c6852c20e1d18887777f44 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -155,13 +155,11 @@ static int clps711x_fb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops clps711x_fb_ops = {
 	.owner		= THIS_MODULE,
+	FB_DEFAULT_IOMEM_OPS,
 	.fb_setcolreg	= clps711x_fb_setcolreg,
 	.fb_check_var	= clps711x_fb_check_var,
 	.fb_set_par	= clps711x_fb_set_par,
 	.fb_blank	= clps711x_fb_blank,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
 };
 
 static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 7a3ed13bed7085641f28b995cd138070b18ce47f..21053bf00dc58cbe9e67d6780ccf66007e7b0817 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -4,7 +4,6 @@
 #
 
 config FB_CORE
-	select FB_IOMEM_FOPS
 	select VIDEO_CMDLINE
 	tristate
 
@@ -129,7 +128,7 @@ config FB_LITTLE_ENDIAN
 
 endchoice
 
-config FB_SYS_FOPS
+config FB_SYSMEM_FOPS
 	tristate
 	depends on FB_CORE
 
@@ -142,8 +141,8 @@ config FB_DMAMEM_HELPERS
 	depends on FB_CORE
 	select FB_SYS_COPYAREA
 	select FB_SYS_FILLRECT
-	select FB_SYS_FOPS
 	select FB_SYS_IMAGEBLIT
+	select FB_SYSMEM_FOPS
 
 config FB_IOMEM_FOPS
 	tristate
@@ -168,8 +167,8 @@ config FB_SYSMEM_HELPERS
 	depends on FB_CORE
 	select FB_SYS_COPYAREA
 	select FB_SYS_FILLRECT
-	select FB_SYS_FOPS
 	select FB_SYS_IMAGEBLIT
+	select FB_SYSMEM_FOPS
 
 config FB_SYSMEM_HELPERS_DEFERRED
 	bool
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index c1d657601b2bec531bee7be4c8c58e16117947d2..d1597475908692c5c002363a813d61fd831d2dc1 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -32,6 +32,6 @@ obj-$(CONFIG_FB_IOMEM_FOPS)    += fb_io_fops.o
 obj-$(CONFIG_FB_SYS_FILLRECT)  += sysfillrect.o
 obj-$(CONFIG_FB_SYS_COPYAREA)  += syscopyarea.o
 obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
-obj-$(CONFIG_FB_SYS_FOPS)      += fb_sys_fops.o
+obj-$(CONFIG_FB_SYSMEM_FOPS)   += fb_sys_fops.o
 obj-$(CONFIG_FB_SVGALIB)       += svgalib.o
 obj-$(CONFIG_FB_DDC)           += fb_ddc.o
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 5b80bf3dae504850d9ebc12de3ac1df32341ccee..a271f57d9c6c12ba5ac9a4df0290a158f8e6254f 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -391,6 +391,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
 	if (p->state != FBINFO_STATE_RUNNING)
 		return;
 
+	if (p->flags & FBINFO_VIRTFB)
+		fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
 	/* if the beginning of the target area might overlap with the end of
 	the source area, be have to copy the area reverse. */
 	if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index ba9f58b2a5e86e65a29aa3cabc67d45087b933b9..cbaa4c9e2355a9c3a65d289c2c0a821a30bc0a11 100644
--- a/drivers/video/fbdev/core/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
@@ -287,6 +287,9 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
 	if (p->state != FBINFO_STATE_RUNNING)
 		return;
 
+	if (p->flags & FBINFO_VIRTFB)
+		fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
 	if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
 		fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index 9ebda4e0dc7abc13187e01c99230f0ee9fe78777..7d1d2f1a627dcb36178973f697ef2b09ed4e6617 100644
--- a/drivers/video/fbdev/core/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
@@ -326,6 +326,9 @@ void cfb_imageblit(struct fb_info *p, const struct fb_image *image)
 	if (p->state != FBINFO_STATE_RUNNING)
 		return;
 
+	if (p->flags & FBINFO_VIRTFB)
+		fb_warn_once(p, "Framebuffer is not in I/O address space.");
+
 	bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
 	start_index = bitstart & (32 - 1);
 	pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/core/fb_chrdev.c b/drivers/video/fbdev/core/fb_chrdev.c
index 32a7315b4b6dd32147414643170006bd6fc8a759..4ebd16b7e3b8d677928f53866eb794ae3f534868 100644
--- a/drivers/video/fbdev/core/fb_chrdev.c
+++ b/drivers/video/fbdev/core/fb_chrdev.c
@@ -34,13 +34,13 @@ static ssize_t fb_read(struct file *file, char __user *buf, size_t count, loff_t
 	if (!info)
 		return -ENODEV;
 
+	if (fb_WARN_ON_ONCE(info, !info->fbops->fb_read))
+		return -EINVAL;
+
 	if (info->state != FBINFO_STATE_RUNNING)
 		return -EPERM;
 
-	if (info->fbops->fb_read)
-		return info->fbops->fb_read(info, buf, count, ppos);
-
-	return fb_io_read(info, buf, count, ppos);
+	return info->fbops->fb_read(info, buf, count, ppos);
 }
 
 static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
@@ -50,13 +50,13 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count,
 	if (!info)
 		return -ENODEV;
 
+	if (fb_WARN_ON_ONCE(info, !info->fbops->fb_write))
+		return -EINVAL;
+
 	if (info->state != FBINFO_STATE_RUNNING)
 		return -EPERM;
 
-	if (info->fbops->fb_write)
-		return info->fbops->fb_write(info, buf, count, ppos);
-
-	return fb_io_write(info, buf, count, ppos);
+	return info->fbops->fb_write(info, buf, count, ppos);
 }
 
 static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
@@ -314,61 +314,19 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
 static int fb_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct fb_info *info = file_fb_info(file);
-	unsigned long mmio_pgoff;
-	unsigned long start;
-	u32 len;
+	int res;
 
 	if (!info)
 		return -ENODEV;
-	mutex_lock(&info->mm_lock);
 
-	if (info->fbops->fb_mmap) {
-		int res;
-
-		/*
-		 * The framebuffer needs to be accessed decrypted, be sure
-		 * SME protection is removed ahead of the call
-		 */
-		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-		res = info->fbops->fb_mmap(info, vma);
-		mutex_unlock(&info->mm_lock);
-		return res;
-#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
-	} else if (info->fbdefio) {
-		/*
-		 * FB deferred I/O wants you to handle mmap in your drivers. At a
-		 * minimum, point struct fb_ops.fb_mmap to fb_deferred_io_mmap().
-		 */
-		dev_warn_once(info->dev, "fbdev mmap not set up for deferred I/O.\n");
-		mutex_unlock(&info->mm_lock);
+	if (fb_WARN_ON_ONCE(info, !info->fbops->fb_mmap))
 		return -ENODEV;
-#endif
-	}
-
-	/*
-	 * Ugh. This can be either the frame buffer mapping, or
-	 * if pgoff points past it, the mmio mapping.
-	 */
-	start = info->fix.smem_start;
-	len = info->fix.smem_len;
-	mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
-	if (vma->vm_pgoff >= mmio_pgoff) {
-		if (info->var.accel_flags) {
-			mutex_unlock(&info->mm_lock);
-			return -EINVAL;
-		}
 
-		vma->vm_pgoff -= mmio_pgoff;
-		start = info->fix.mmio_start;
-		len = info->fix.mmio_len;
-	}
+	mutex_lock(&info->mm_lock);
+	res = info->fbops->fb_mmap(info, vma);
 	mutex_unlock(&info->mm_lock);
 
-	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-	vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
-					       vma->vm_end, start);
-
-	return vm_iomap_memory(vma, start, len);
+	return res;
 }
 
 static int fb_open(struct inode *inode, struct file *file)
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 274f5d0fa24714ab31a7a6fcb1aa1667bda78c8a..1b0b85e59e5e130c29ac6e333942a1e040e637d0 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -227,6 +227,8 @@ static const struct address_space_operations fb_deferred_io_aops = {
 
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	vma->vm_ops = &fb_deferred_io_vm_ops;
 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
 	if (!(info->flags & FBINFO_VIRTFB))
diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c
index 871b829521af356cbb2f203d766e8275ecf7e750..3408ff1b2b7a0a69757ec11111e74ceeac84be50 100644
--- a/drivers/video/fbdev/core/fb_io_fops.c
+++ b/drivers/video/fbdev/core/fb_io_fops.c
@@ -12,6 +12,9 @@ ssize_t fb_io_read(struct fb_info *info, char __user *buf, size_t count, loff_t
 	int c, cnt = 0, err = 0;
 	unsigned long total_size, trailing;
 
+	if (info->flags & FBINFO_VIRTFB)
+		fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
 	if (!info->screen_base)
 		return -ENODEV;
 
@@ -73,6 +76,9 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
 	int c, cnt = 0, err = 0;
 	unsigned long total_size, trailing;
 
+	if (info->flags & FBINFO_VIRTFB)
+		fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
 	if (!info->screen_base)
 		return -ENODEV;
 
@@ -132,5 +138,35 @@ ssize_t fb_io_write(struct fb_info *info, const char __user *buf, size_t count,
 }
 EXPORT_SYMBOL(fb_io_write);
 
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	unsigned long start = info->fix.smem_start;
+	u32 len = info->fix.smem_len;
+	unsigned long mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
+
+	if (info->flags & FBINFO_VIRTFB)
+		fb_warn_once(info, "Framebuffer is not in I/O address space.");
+
+	/*
+	 * This can be either the framebuffer mapping, or if pgoff points
+	 * past it, the mmio mapping.
+	 */
+	if (vma->vm_pgoff >= mmio_pgoff) {
+		if (info->var.accel_flags)
+			return -EINVAL;
+
+		vma->vm_pgoff -= mmio_pgoff;
+		start = info->fix.mmio_start;
+		len = info->fix.mmio_len;
+	}
+
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	vma->vm_page_prot = pgprot_framebuffer(vma->vm_page_prot, vma->vm_start,
+					       vma->vm_end, start);
+
+	return vm_iomap_memory(vma, start, len);
+}
+EXPORT_SYMBOL(fb_io_mmap);
+
 MODULE_DESCRIPTION("Fbdev helpers for framebuffers in I/O memory");
 MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/core/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c
index 0cb0989abda61c5f4e3d0cb32b105bf6848a419c..a9aa6519a5b30be5e5d07de5ad6f96dad95b94cf 100644
--- a/drivers/video/fbdev/core/fb_sys_fops.c
+++ b/drivers/video/fbdev/core/fb_sys_fops.c
@@ -22,6 +22,9 @@ ssize_t fb_sys_read(struct fb_info *info, char __user *buf, size_t count,
 	unsigned long total_size, c;
 	ssize_t ret;
 
+	if (!(info->flags & FBINFO_VIRTFB))
+		fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
 	if (!info->screen_buffer)
 		return -ENODEV;
 
@@ -64,6 +67,9 @@ ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
 	unsigned long total_size, c;
 	size_t ret;
 
+	if (!(info->flags & FBINFO_VIRTFB))
+		fb_warn_once(info, "Framebuffer is not in virtual address space.");
+
 	if (!info->screen_buffer)
 		return -ENODEV;
 
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 7b8bd3a2bedc50c5ca23cb180f52dd99229d9cfc..75e7001e8450faa306ff55388174373daf223d88 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -324,6 +324,9 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
 	if (p->state != FBINFO_STATE_RUNNING)
 		return;
 
+	if (!(p->flags & FBINFO_VIRTFB))
+		fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
 	/* if the beginning of the target area might overlap with the end of
 	the source area, be have to copy the area reverse. */
 	if ((dy == sy && dx > sx) || (dy > sy)) {
diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index bcdcaeae6538c328cc2de4b2c48bbb6b5307959b..e49221a88ccc727fa1b2fbe3684ba0bb2e0b73ad 100644
--- a/drivers/video/fbdev/core/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
@@ -242,6 +242,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
 	if (p->state != FBINFO_STATE_RUNNING)
 		return;
 
+	if (!(p->flags & FBINFO_VIRTFB))
+		fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
 	if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
 	    p->fix.visual == FB_VISUAL_DIRECTCOLOR )
 		fg = ((u32 *) (p->pseudo_palette))[rect->color];
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index 665ef7a0a2495bde34d6306d268cdb41353f9061..6949bbd51d9209b7ced72903438361300870606b 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -296,6 +296,9 @@ void sys_imageblit(struct fb_info *p, const struct fb_image *image)
 	if (p->state != FBINFO_STATE_RUNNING)
 		return;
 
+	if (!(p->flags & FBINFO_VIRTFB))
+		fb_warn_once(p, "Framebuffer is not in virtual address space.");
+
 	bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
 	start_index = bitstart & (32 - 1);
 	pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 52105dc1a72f3f916bb7313f54fe7d72e9aa05bf..abb87d3576db0f178c86d480dc6973f86ca8e453 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -227,13 +227,6 @@ cyber2000fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 			   CO_REG_CMD_H, cfb);
 }
 
-static void
-cyber2000fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-	cfb_imageblit(info, image);
-	return;
-}
-
 static int cyber2000fb_sync(struct fb_info *info)
 {
 	struct cfb_info *cfb = container_of(info, struct cfb_info, fb);
@@ -1069,7 +1062,7 @@ static const struct fb_ops cyber2000fb_ops = {
 	.fb_pan_display	= cyber2000fb_pan_display,
 	.fb_fillrect	= cyber2000fb_fillrect,
 	.fb_copyarea	= cyber2000fb_copyarea,
-	.fb_imageblit	= cyber2000fb_imageblit,
+	.fb_imageblit	= cfb_imageblit,
 	.fb_sync	= cyber2000fb_sync,
 	__FB_DEFAULT_IOMEM_OPS_MMAP,
 };
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index cae00deee00148cf8ec732cafa7a01efddd5b101..3e378874ccc79ea673d340c8a183cddbd227cb78 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -311,6 +311,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
 	unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	if (offset < info->fix.smem_len) {
 		return dma_mmap_wc(info->device, vma, info->screen_base,
 				   info->fix.smem_start, info->fix.smem_len);
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index e89e5579258efd70706ff146b532cba6aa54c605..8463de833d1e19efb27b2c4d2b6d1952e139fb71 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1000,6 +1000,8 @@ static int gbefb_mmap(struct fb_info *info,
 	unsigned long phys_addr, phys_size;
 	u16 *tile;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	/* check range */
 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
 		return -EINVAL;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 694cf6318782bc8e60fe6dfd4445894099ba74ff..aa31c0d26e92531db9245249bd51ffd527dc0856 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1203,6 +1203,8 @@ static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	struct omapfb_device *fbdev = plane->fbdev;
 	int r;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	omapfb_rqueue_lock(fbdev);
 	r = fbdev->ctrl->mmap(info, vma);
 	omapfb_rqueue_unlock(fbdev);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index c9fd0ad352d7fec0b177359b47a7aa48909a3ff1..0db9c55fce5a281b21aeadb386a77be469638908 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1095,6 +1095,8 @@ static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
 	u32 len;
 	int r;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	rg = omapfb_get_mem_region(ofbi->region);
 
 	start = omapfb_get_region_paddr(ofbi);
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 64d291d6b1532eab1d605056dab77e94df126c83..dbcda307f6a671dd5070ad15a497bde1307de90f 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -708,6 +708,8 @@ static int ps3fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
 	int r;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	r = vm_iomap_memory(vma, info->fix.smem_start, info->fix.smem_len);
 
 	dev_dbg(info->device, "ps3fb: mmap framebuffer P(%lx)->V(%lx)\n",
@@ -939,15 +941,12 @@ static const struct fb_ops ps3fb_ops = {
 	.owner		= THIS_MODULE,
 	.fb_open	= ps3fb_open,
 	.fb_release	= ps3fb_release,
-	.fb_read        = fb_sys_read,
-	.fb_write       = fb_sys_write,
+	__FB_DEFAULT_SYSMEM_OPS_RDWR,
 	.fb_check_var	= ps3fb_check_var,
 	.fb_set_par	= ps3fb_set_par,
 	.fb_setcolreg	= ps3fb_setcolreg,
 	.fb_pan_display	= ps3fb_pan_display,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
+	__FB_DEFAULT_SYSMEM_OPS_DRAW,
 	.fb_mmap	= ps3fb_mmap,
 	.fb_blank	= ps3fb_blank,
 	.fb_ioctl	= ps3fb_ioctl,
@@ -1145,7 +1144,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
 	info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
 
 	info->pseudo_palette = par->pseudo_palette;
-	info->flags = FBINFO_READS_FAST |
+	info->flags = FBINFO_VIRTFB | FBINFO_READS_FAST |
 		      FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
 
 	retval = fb_alloc_cmap(&info->cmap, 256, 0);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index befd3fe2f6596179c5d5f319b843b023321b0d5f..0d362d2bf0e38982d9d4afb424fa2a04844d438b 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -562,6 +562,8 @@ static int sa1100fb_mmap(struct fb_info *info,
 		container_of(info, struct sa1100fb_info, fb);
 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	if (off < info->fix.smem_len) {
 		vma->vm_pgoff += 1; /* skip over the palette */
 		return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 21e9fd8e69e268444acb2c29bce9bb447c335f54..634e3d159452c1619a311b13f22fedd787478563 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -48,7 +48,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 	unsigned long map_offset = 0;
 	unsigned long off;
 	int i;
-                                        
+
 	if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE)))
 		return -EINVAL;
 
@@ -60,6 +60,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 
 	/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
 	/* Each page, see which map applies */
@@ -72,7 +73,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
 #define POFF_MASK	(PAGE_MASK|0x1UL)
 #else
 #define POFF_MASK	(PAGE_MASK)
-#endif				
+#endif
 				map_offset = (physbase + map[i].poff) & POFF_MASK;
 				break;
 			}
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 1364dafaadb1d4632d95ca8be37a84d341734336..eb2297b37504c90437f8f09eb6c970a357865f57 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1482,19 +1482,18 @@ sh_mobile_lcdc_overlay_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	if (info->fbdefio)
 		return fb_deferred_io_mmap(info, vma);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return dma_mmap_coherent(ovl->channel->lcdc->dev, vma, ovl->fb_mem,
 				 ovl->dma_handle, ovl->fb_size);
 }
 
 static const struct fb_ops sh_mobile_lcdc_overlay_ops = {
 	.owner          = THIS_MODULE,
-	.fb_read        = fb_sys_read,
-	.fb_write       = fb_sys_write,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
+	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 	.fb_blank	= sh_mobile_lcdc_overlay_blank,
 	.fb_pan_display = sh_mobile_lcdc_overlay_pan,
+	__FB_DEFAULT_DMAMEM_OPS_DRAW,
 	.fb_ioctl       = sh_mobile_lcdc_overlay_ioctl,
 	.fb_check_var	= sh_mobile_lcdc_overlay_check_var,
 	.fb_set_par	= sh_mobile_lcdc_overlay_set_par,
@@ -1567,6 +1566,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
 
 	info->fbops = &sh_mobile_lcdc_overlay_ops;
 	info->device = priv->dev;
+	info->flags |= FBINFO_VIRTFB;
 	info->screen_buffer = ovl->fb_mem;
 	info->par = ovl;
 
@@ -1958,6 +1958,8 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	if (info->fbdefio)
 		return fb_deferred_io_mmap(info, vma);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return dma_mmap_coherent(ch->lcdc->dev, vma, ch->fb_mem,
 				 ch->dma_handle, ch->fb_size);
 }
@@ -1965,8 +1967,7 @@ sh_mobile_lcdc_mmap(struct fb_info *info, struct vm_area_struct *vma)
 static const struct fb_ops sh_mobile_lcdc_ops = {
 	.owner          = THIS_MODULE,
 	.fb_setcolreg	= sh_mobile_lcdc_setcolreg,
-	.fb_read        = fb_sys_read,
-	.fb_write       = fb_sys_write,
+	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 	.fb_fillrect	= sh_mobile_lcdc_fillrect,
 	.fb_copyarea	= sh_mobile_lcdc_copyarea,
 	.fb_imageblit	= sh_mobile_lcdc_imageblit,
@@ -2053,6 +2054,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
 
 	info->fbops = &sh_mobile_lcdc_ops;
 	info->device = priv->dev;
+	info->flags |= FBINFO_VIRTFB;
 	info->screen_buffer = ch->fb_mem;
 	info->pseudo_palette = &ch->pseudo_palette;
 	info->par = ch;
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 3f8ef50e32095d19548c77b1dd193a005e51d892..104f122e0f273d52129b3e2427db26601721e225 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -1347,16 +1347,14 @@ static int smtc_set_par(struct fb_info *info)
 
 static const struct fb_ops smtcfb_ops = {
 	.owner        = THIS_MODULE,
-	FB_DEFAULT_IOMEM_OPS,
 	.fb_check_var = smtc_check_var,
 	.fb_set_par   = smtc_set_par,
 	.fb_setcolreg = smtc_setcolreg,
 	.fb_blank     = smtc_blank,
-	.fb_fillrect  = cfb_fillrect,
-	.fb_imageblit = cfb_imageblit,
-	.fb_copyarea  = cfb_copyarea,
+	__FB_DEFAULT_IOMEM_OPS_DRAW,
 	.fb_read      = smtcfb_read,
 	.fb_write     = smtcfb_write,
+	__FB_DEFAULT_IOMEM_OPS_MMAP,
 };
 
 /*
diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
index 90a77d19b236baa922535519597af33c8e015673..35d682b110c428aaf8ed6e39f386bb678f033d75 100644
--- a/drivers/video/fbdev/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
@@ -783,6 +783,8 @@ static int ufx_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	if (info->fbdefio)
 		return fb_deferred_io_mmap(info, vma);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
 		return -EINVAL;
 	if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index 2460ff4ac86b427e8985c81cd305b068f657e36a..1514ddac4cafc4b4b9483d707dc6bb7dc07a2c79 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -331,6 +331,8 @@ static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	if (info->fbdefio)
 		return fb_deferred_io_mmap(info, vma);
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
 		return -EINVAL;
 	if (size > info->fix.smem_len)
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 840ead69654b85ea2e2927fc315dc3e7b90cec0d..a087b42ca652f9aa55d4a545a650d35a63ec6deb 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -998,6 +998,8 @@ static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 	int ret;
 	unsigned long prot;
 
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	ret = vmlfb_vram_offset(vinfo, offset);
 	if (ret)
 		return -EINVAL;
diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 1b7c338f99560fc4a3bca8cdfe9ba2d495ca3eaf..f86149ba383523c1f3c1fede4dcb3f7dd3d47783 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -80,15 +80,12 @@ static int vfb_mmap(struct fb_info *info,
 
 static const struct fb_ops vfb_ops = {
 	.owner		= THIS_MODULE,
-	.fb_read        = fb_sys_read,
-	.fb_write       = fb_sys_write,
+	__FB_DEFAULT_SYSMEM_OPS_RDWR,
 	.fb_check_var	= vfb_check_var,
 	.fb_set_par	= vfb_set_par,
 	.fb_setcolreg	= vfb_setcolreg,
 	.fb_pan_display	= vfb_pan_display,
-	.fb_fillrect	= sys_fillrect,
-	.fb_copyarea	= sys_copyarea,
-	.fb_imageblit	= sys_imageblit,
+	__FB_DEFAULT_SYSMEM_OPS_DRAW,
 	.fb_mmap	= vfb_mmap,
 };
 
@@ -385,6 +382,8 @@ static int vfb_pan_display(struct fb_var_screeninfo *var,
 static int vfb_mmap(struct fb_info *info,
 		    struct vm_area_struct *vma)
 {
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
 	return remap_vmalloc_range(vma, (void *)info->fix.smem_start, vma->vm_pgoff);
 }
 
@@ -440,6 +439,7 @@ static int vfb_probe(struct platform_device *dev)
 	if (!info)
 		goto err;
 
+	info->flags |= FBINFO_VIRTFB;
 	info->screen_buffer = videomemory;
 	info->fbops = &vfb_ops;
 
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index 42d39a9d5130f98fe9beb56774b0225e0240e2da..42c25dc851976c5fa823b89fc4f72e5826d17459 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -241,6 +241,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops vt8500lcd_ops = {
 	.owner		= THIS_MODULE,
+	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 	.fb_set_par	= vt8500lcd_set_par,
 	.fb_setcolreg	= vt8500lcd_setcolreg,
 	.fb_fillrect	= wmt_ge_fillrect,
@@ -250,6 +251,7 @@ static const struct fb_ops vt8500lcd_ops = {
 	.fb_ioctl	= vt8500lcd_ioctl,
 	.fb_pan_display	= vt8500lcd_pan_display,
 	.fb_blank	= vt8500lcd_blank,
+	// .fb_mmap needs DMA mmap
 };
 
 static irqreturn_t vt8500lcd_handle_irq(int irq, void *dev_id)
@@ -357,7 +359,7 @@ static int vt8500lcd_probe(struct platform_device *pdev)
 
 	fbi->fb.fix.smem_start	= fb_mem_phys;
 	fbi->fb.fix.smem_len	= fb_mem_len;
-	fbi->fb.screen_base	= fb_mem_virt;
+	fbi->fb.screen_buffer	= fb_mem_virt;
 
 	fbi->palette_size	= PAGE_ALIGN(512);
 	fbi->palette_cpu	= dma_alloc_coherent(&pdev->dev,
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 5833147aa43d176a1d7f26449a66405899b1a16c..00952e9c880287abf7fe17240e70aa15f9c20c6a 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -248,6 +248,7 @@ static int wm8505fb_blank(int blank, struct fb_info *info)
 
 static const struct fb_ops wm8505fb_ops = {
 	.owner		= THIS_MODULE,
+	__FB_DEFAULT_DMAMEM_OPS_RDWR,
 	.fb_set_par	= wm8505fb_set_par,
 	.fb_setcolreg	= wm8505fb_setcolreg,
 	.fb_fillrect	= wmt_ge_fillrect,
@@ -256,6 +257,7 @@ static const struct fb_ops wm8505fb_ops = {
 	.fb_sync	= wmt_ge_sync,
 	.fb_pan_display	= wm8505fb_pan_display,
 	.fb_blank	= wm8505fb_blank,
+	__FB_DEFAULT_IOMEM_OPS_MMAP,
 };
 
 static int wm8505fb_probe(struct platform_device *pdev)
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
new file mode 100644
index 0000000000000000000000000000000000000000..66249ff0858e75c0a03cb49921da4d62b0cb7831
--- /dev/null
+++ b/include/drm/bridge/aux-bridge.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#ifndef DRM_AUX_BRIDGE_H
+#define DRM_AUX_BRIDGE_H
+
+#include <drm/drm_connector.h>
+
+#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
+int drm_aux_bridge_register(struct device *parent);
+#else
+static inline int drm_aux_bridge_register(struct device *parent)
+{
+	return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+					  struct device_node *np);
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
+#else
+static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+							struct device_node *np)
+{
+	return 0;
+}
+
+static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 83d2039c018b4d5c5be508901e2c6d97927b9a7b..3731828825bdf53f1953f977f17208ddd6812e79 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -651,6 +651,9 @@
 # define DP_LINK_QUAL_PATTERN_PRSBS31       0x38
 # define DP_LINK_QUAL_PATTERN_CUSTOM        0x40
 # define DP_LINK_QUAL_PATTERN_SQUARE        0x48
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED                   0x49
+# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED                 0x4a
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED        0x4b
 
 #define DP_TRAINING_LANE0_1_SET2	    0x10f
 #define DP_TRAINING_LANE2_3_SET2	    0x110
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index ba248ca8866f58091e2bff7f3a860defc5cacdc3..50131383ed8146835d656c0d61c0a8a756cbaf5d 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,24 +33,6 @@
 #include <linux/wait.h>
 
 struct drm_file;
-struct drm_hw_lock;
-
-/*
- * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
- * include ordering reasons.
- *
- * DO NOT USE.
- */
-struct drm_lock_data {
-	struct drm_hw_lock *hw_lock;
-	struct drm_file *file_priv;
-	wait_queue_head_t lock_queue;
-	unsigned long lock_time;
-	spinlock_t spinlock;
-	uint32_t kernel_waiters;
-	uint32_t user_waiters;
-	int idle_has_lock;
-};
 
 /**
  * struct drm_master - drm master structure
@@ -145,10 +127,6 @@ struct drm_master {
 	 * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
 	 */
 	struct idr lessee_idr;
-	/* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	struct drm_lock_data lock;
-#endif
 };
 
 struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index cfb7dcdb66c4b0b5121042604236c347e9a262c4..e39da5807ba71c2da4fc3b5039b103838721cd67 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -194,7 +194,7 @@ struct drm_bridge_funcs {
 	 * or &drm_encoder_helper_funcs.dpms hook.
 	 *
 	 * The bridge must assume that the display pipe (i.e. clocks and timing
-	 * singals) feeding it is no longer running when this callback is
+	 * signals) feeding it is no longer running when this callback is
 	 * called.
 	 *
 	 * The @post_disable callback is optional.
@@ -950,6 +950,4 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
 }
 #endif
 
-void drm_bridge_debugfs_init(struct drm_device *dev);
-
 #endif
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index c490977ee250c956117ab286d8cfe742a6030096..63767cf24371b9dbb40af09a73362ef5f3ff8eb3 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,7 +6,6 @@
 #include <linux/mutex.h>
 #include <linux/idr.h>
 
-#include <drm/drm_legacy.h>
 #include <drm/drm_mode_config.h>
 
 struct drm_driver;
@@ -153,8 +152,8 @@ struct drm_device {
 	 *
 	 * Lock for others (not &drm_minor.master and &drm_file.is_master)
 	 *
-	 * WARNING:
-	 * Only drivers annotated with DRIVER_LEGACY should be using this.
+	 * TODO: This lock used to be the BKL of the DRM subsystem. Move the
+	 *       lock into i915, which is the only remaining user.
 	 */
 	struct mutex struct_mutex;
 
@@ -317,72 +316,6 @@ struct drm_device {
 	 * Root directory for debugfs files.
 	 */
 	struct dentry *debugfs_root;
-
-	/* Everything below here is for legacy driver, never use! */
-	/* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	/* List of devices per driver for stealth attach cleanup */
-	struct list_head legacy_dev_list;
-
-#ifdef __alpha__
-	/** @hose: PCI hose, only used on ALPHA platforms. */
-	struct pci_controller *hose;
-#endif
-
-	/* AGP data */
-	struct drm_agp_head *agp;
-
-	/* Context handle management - linked list of context handles */
-	struct list_head ctxlist;
-
-	/* Context handle management - mutex for &ctxlist */
-	struct mutex ctxlist_mutex;
-
-	/* Context handle management */
-	struct idr ctx_idr;
-
-	/* Memory management - linked list of regions */
-	struct list_head maplist;
-
-	/* Memory management - user token hash table for maps */
-	struct drm_open_hash map_hash;
-
-	/* Context handle management - list of vmas (for debugging) */
-	struct list_head vmalist;
-
-	/* Optional pointer for DMA support */
-	struct drm_device_dma *dma;
-
-	/* Context swapping flag */
-	__volatile__ long context_flag;
-
-	/* Last current context */
-	int last_context;
-
-	/* Lock for &buf_use and a few other things. */
-	spinlock_t buf_lock;
-
-	/* Usage counter for buffers in use -- cannot alloc */
-	int buf_use;
-
-	/* Buffer allocation in progress */
-	atomic_t buf_alloc;
-
-	struct {
-		int context;
-		struct drm_hw_lock *lock;
-	} sigdata;
-
-	struct drm_local_map *agp_buffer_map;
-	unsigned int agp_buffer_token;
-
-	/* Scatter gather memory */
-	struct drm_sg_mem *sg;
-
-	/* IRQs */
-	bool irq_enabled;
-	int irq;
-#endif
 };
 
 #endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index e2640dc64e081cc9c82f4c15db3da40bfd26c6fe..8878260d7529dcbe9af6f9ad3e530b30f02c3e22 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -110,6 +110,15 @@ enum drm_driver_feature {
 	 * Driver supports user defined GPU VA bindings for GEM objects.
 	 */
 	DRIVER_GEM_GPUVA		= BIT(8),
+	/**
+	 * @DRIVER_CURSOR_HOTSPOT:
+	 *
+	 * Driver supports and requires cursor hotspot information in the
+	 * cursor plane (e.g. cursor plane has to actually track the mouse
+	 * cursor and the clients are required to set hotspot in order for
+	 * the cursor planes to work correctly).
+	 */
+	DRIVER_CURSOR_HOTSPOT           = BIT(9),
 
 	/* IMPORTANT: Below are all the legacy flags, add new ones above. */
 
@@ -433,25 +442,6 @@ struct drm_driver {
 	 * some examples.
 	 */
 	const struct file_operations *fops;
-
-#ifdef CONFIG_DRM_LEGACY
-	/* Everything below here is for legacy driver, never use! */
-	/* private: */
-
-	int (*firstopen) (struct drm_device *);
-	void (*preclose) (struct drm_device *, struct drm_file *file_priv);
-	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
-	int (*dma_quiescent) (struct drm_device *);
-	int (*context_dtor) (struct drm_device *dev, int context);
-	irqreturn_t (*irq_handler)(int irq, void *arg);
-	void (*irq_preinstall)(struct drm_device *dev);
-	int (*irq_postinstall)(struct drm_device *dev);
-	void (*irq_uninstall)(struct drm_device *dev);
-	u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
-	int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
-	void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
-	int dev_priv_size;
-#endif
 };
 
 void *__devm_drm_dev_alloc(struct device *parent,
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 3a09682af685a36ba42565e926fe2e0ec6955a1d..977a9381c8ba943b4d3e021635ea14856df8a17d 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -60,7 +60,7 @@ struct drm_encoder_funcs {
 	 * @late_register:
 	 *
 	 * This optional hook can be used to register additional userspace
-	 * interfaces attached to the encoder like debugfs interfaces.
+	 * interfaces attached to the encoder.
 	 * It is called late in the driver load sequence from drm_dev_register().
 	 * Everything added from this callback should be unregistered in
 	 * the early_unregister callback.
@@ -81,6 +81,13 @@ struct drm_encoder_funcs {
 	 * before data structures are torndown.
 	 */
 	void (*early_unregister)(struct drm_encoder *encoder);
+
+	/**
+	 * @debugfs_init:
+	 *
+	 * Allows encoders to create encoder-specific debugfs files.
+	 */
+	void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root);
 };
 
 /**
@@ -184,6 +191,13 @@ struct drm_encoder {
 
 	const struct drm_encoder_funcs *funcs;
 	const struct drm_encoder_helper_funcs *helper_private;
+
+	/**
+	 * @debugfs_entry:
+	 *
+	 * Debugfs directory for this CRTC.
+	 */
+	struct dentry *debugfs_entry;
 };
 
 #define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index e1b5b4282f75d16dde18ed18830c83fa5dfd0fe7..ab230d3af138db72cd29c97bd792993d0ebb7995 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -226,6 +226,18 @@ struct drm_file {
 	 */
 	bool is_master;
 
+	/**
+	 * @supports_virtualized_cursor_plane:
+	 *
+	 * This client is capable of handling the cursor plane with the
+	 * restrictions imposed on it by the virtualized drivers.
+	 *
+	 * This implies that the cursor plane has to behave like a cursor
+	 * i.e. track cursor movement. It also requires setting of the
+	 * hotspot properties by the client on the cursor plane.
+	 */
+	bool supports_virtualized_cursor_plane;
+
 	/**
 	 * @master:
 	 *
@@ -374,11 +386,6 @@ struct drm_file {
 	 * Per-file buffer caches used by the PRIME buffer sharing code.
 	 */
 	struct drm_prime_file_private prime;
-
-	/* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
-	unsigned long lock_count; /* DRI1 legacy lock count */
-#endif
 };
 
 /**
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 80ece7b6dd9b59a81626276d849009c22d1b4b9a..668077009fced0e325a69db1882ef3a851f28647 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -188,18 +188,6 @@ struct drm_framebuffer {
 	 * DRM_MODE_FB_MODIFIERS.
 	 */
 	int flags;
-	/**
-	 * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
-	 * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
-	 * universal plane.
-	 */
-	int hot_x;
-	/**
-	 * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
-	 * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
-	 * universal plane.
-	 */
-	int hot_y;
 	/**
 	 * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
 	 */
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index f94fec9a8517bb273b8f261acd370bd80221dce6..6258849382e165e01eacb2568d9b441fd31c5297 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -544,26 +544,9 @@ struct drm_gpuvm_exec {
 	} extra;
 };
 
-/**
- * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv
- * @gpuvm: the &drm_gpuvm
- * @exec: the &drm_exec context
- * @num_fences: the amount of &dma_fences to reserve
- *
- * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object.
- *
- * Using this function directly, it is the drivers responsibility to call
- * drm_exec_init() and drm_exec_fini() accordingly.
- *
- * Returns: 0 on success, negative error code on failure.
- */
-static inline int
-drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
-		     struct drm_exec *exec,
-		     unsigned int num_fences)
-{
-	return drm_exec_prepare_obj(exec, gpuvm->r_obj, num_fences);
-}
+int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+			 struct drm_exec *exec,
+			 unsigned int num_fences);
 
 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
 			      struct drm_exec *exec,
@@ -738,7 +721,7 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
 	return vm_bo;
 }
 
-void drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
 
 struct drm_gpuvm_bo *
 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 6ed61c371f6ce9e376500b7b924a518a9c01b18e..171760b6c4a1472d3e1f08412af575dd5c15153d 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -109,17 +109,6 @@ enum drm_ioctl_flags {
 	 * This is equivalent to callers with the SYSADMIN capability.
 	 */
 	DRM_ROOT_ONLY		= BIT(2),
-	/**
-	 * @DRM_UNLOCKED:
-	 *
-	 * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
-	 * or not. Enforced as the default for all modern drivers, hence there
-	 * should never be a need to set this flag.
-	 *
-	 * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
-	 * only legacy IOCTL which needs this.
-	 */
-	DRM_UNLOCKED		= BIT(4),
 	/**
 	 * @DRM_RENDER_ALLOW:
 	 *
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
deleted file mode 100644
index 0fc85418aad8c07d723f271244fefc8e6fcde527..0000000000000000000000000000000000000000
--- a/include/drm/drm_legacy.h
+++ /dev/null
@@ -1,331 +0,0 @@
-#ifndef __DRM_DRM_LEGACY_H__
-#define __DRM_DRM_LEGACY_H__
-/*
- * Legacy driver interfaces for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- * Copyright © 2014 Intel Corporation
- *   Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/agp_backend.h>
-
-#include <drm/drm.h>
-#include <drm/drm_auth.h>
-
-struct drm_device;
-struct drm_driver;
-struct file;
-struct pci_driver;
-
-/*
- * Legacy Support for palateontologic DRM drivers
- *
- * If you add a new driver and it uses any of these functions or structures,
- * you're doing it terribly wrong.
- */
-
-/*
- * Hash-table Support
- */
-
-struct drm_hash_item {
-	struct hlist_node head;
-	unsigned long key;
-};
-
-struct drm_open_hash {
-	struct hlist_head *table;
-	u8 order;
-};
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
-	int idx;		       /**< Index into master buflist */
-	int total;		       /**< Buffer size */
-	int order;		       /**< log-base-2(total) */
-	int used;		       /**< Amount of buffer in use (for DMA) */
-	unsigned long offset;	       /**< Byte offset (used internally) */
-	void *address;		       /**< Address of buffer */
-	unsigned long bus_address;     /**< Bus address of buffer */
-	struct drm_buf *next;	       /**< Kernel-only: used for free list */
-	__volatile__ int waiting;      /**< On kernel DMA queue */
-	__volatile__ int pending;      /**< On hardware DMA queue */
-	struct drm_file *file_priv;    /**< Private of holding file descr */
-	int context;		       /**< Kernel queue for this buffer */
-	int while_locked;	       /**< Dispatch this buffer while locked */
-	enum {
-		DRM_LIST_NONE = 0,
-		DRM_LIST_FREE = 1,
-		DRM_LIST_WAIT = 2,
-		DRM_LIST_PEND = 3,
-		DRM_LIST_PRIO = 4,
-		DRM_LIST_RECLAIM = 5
-	} list;			       /**< Which list we're on */
-
-	int dev_priv_size;		 /**< Size of buffer private storage */
-	void *dev_private;		 /**< Per-buffer private storage */
-};
-
-typedef struct drm_dma_handle {
-	dma_addr_t busaddr;
-	void *vaddr;
-	size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry.  There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
-	int buf_size;			/**< size */
-	int buf_count;			/**< number of buffers */
-	struct drm_buf *buflist;		/**< buffer list */
-	int seg_count;
-	int page_order;
-	struct drm_dma_handle **seglist;
-
-	int low_mark;			/**< Low water mark */
-	int high_mark;			/**< High water mark */
-};
-
-/**
- * DMA data.
- */
-struct drm_device_dma {
-
-	struct drm_buf_entry bufs[DRM_MAX_ORDER + 1];	/**< buffers, grouped by their size order */
-	int buf_count;			/**< total number of buffers */
-	struct drm_buf **buflist;		/**< Vector of pointers into drm_device_dma::bufs */
-	int seg_count;
-	int page_count;			/**< number of pages */
-	unsigned long *pagelist;	/**< page list */
-	unsigned long byte_count;
-	enum {
-		_DRM_DMA_USE_AGP = 0x01,
-		_DRM_DMA_USE_SG = 0x02,
-		_DRM_DMA_USE_FB = 0x04,
-		_DRM_DMA_USE_PCI_RO = 0x08
-	} flags;
-
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
-	unsigned long handle;
-	void *virtual;
-	int pages;
-	struct page **pagelist;
-	dma_addr_t *busaddr;
-};
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
-	dma_addr_t offset;	 /**< Requested physical address (0 for SAREA)*/
-	unsigned long size;	 /**< Requested physical size (bytes) */
-	enum drm_map_type type;	 /**< Type of memory to map */
-	enum drm_map_flags flags;	 /**< Flags */
-	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
-				 /**< Kernel-space: kernel-virtual address */
-	int mtrr;		 /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
-	struct list_head head;		/**< list head */
-	struct drm_hash_item hash;
-	struct drm_local_map *map;	/**< mapping */
-	uint64_t user_token;
-	struct drm_master *master;
-};
-
-int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
-		      unsigned int size, enum drm_map_type type,
-		      enum drm_map_flags flags, struct drm_local_map **map_p);
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
-void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
-int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
-int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
-
-/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv )				\
-do {										\
-	if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) ||	\
-	    _file_priv->master->lock.file_priv != _file_priv)	{		\
-		DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
-			   __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
-			   _file_priv->master->lock.file_priv, _file_priv);	\
-		return -EINVAL;							\
-	}									\
-} while (0)
-
-void drm_legacy_idlelock_take(struct drm_lock_data *lock);
-void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-
-/* drm_irq.c */
-int drm_legacy_irq_uninstall(struct drm_device *dev);
-
-/* drm_pci.c */
-
-#ifdef CONFIG_PCI
-
-int drm_legacy_pci_init(const struct drm_driver *driver,
-			struct pci_driver *pdriver);
-void drm_legacy_pci_exit(const struct drm_driver *driver,
-			 struct pci_driver *pdriver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
-						   size_t size, size_t align)
-{
-	return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
-				struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_legacy_pci_init(const struct drm_driver *driver,
-				      struct pci_driver *pdriver)
-{
-	return -EINVAL;
-}
-
-static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
-				       struct pci_driver *pdriver)
-{
-}
-
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_head {
-	struct agp_kern_info agp_info;
-	struct list_head memory;
-	unsigned long mode;
-	struct agp_bridge_data *bridge;
-	int enabled;
-	int acquired;
-	unsigned long base;
-	int agp_mtrr;
-	int cant_use_aperture;
-	unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
-int drm_legacy_agp_acquire(struct drm_device *dev);
-int drm_legacy_agp_release(struct drm_device *dev);
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-#else
-static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
-	return NULL;
-}
-
-static inline int drm_legacy_agp_acquire(struct drm_device *dev)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_release(struct drm_device *dev)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_enable(struct drm_device *dev,
-					struct drm_agp_mode mode)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_info(struct drm_device *dev,
-				      struct drm_agp_info *info)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_alloc(struct drm_device *dev,
-				       struct drm_agp_buffer *request)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_free(struct drm_device *dev,
-				      struct drm_agp_buffer *request)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_unbind(struct drm_device *dev,
-					struct drm_agp_binding *request)
-{
-	return -ENODEV;
-}
-
-static inline int drm_legacy_agp_bind(struct drm_device *dev,
-				      struct drm_agp_binding *request)
-{
-	return -ENODEV;
-}
-#endif
-
-/* drm_memory.c */
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index c9df0407980c9413ce2e8365e5c7e36a3a57a404..c0aec0d4d664e7f216a334a0cd755f8b0e9ac740 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,7 @@ struct mipi_dsi_device_info {
  * struct mipi_dsi_device - DSI peripheral device
  * @host: DSI host for this peripheral
  * @dev: driver model device node for this peripheral
+ * @attached: the DSI device has been successfully attached
  * @name: DSI peripheral chip type
  * @channel: virtual channel assigned to the peripheral
  * @format: pixel format for video mode
@@ -184,6 +185,7 @@ struct mipi_dsi_device_info {
 struct mipi_dsi_device {
 	struct mipi_dsi_host *host;
 	struct device dev;
+	bool attached;
 
 	char name[DSI_DEV_NAME_SIZE];
 	unsigned int channel;
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index e3c3ac615909474bce7163230ed53cb22e1e7919..a33cf74887375a8b127b90b2a6e98157cc4a874f 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1154,6 +1154,11 @@ struct drm_connector_helper_funcs {
 	 * This operation is optional.
 	 *
 	 * This callback is used by the drm_kms_helper_poll_enable() helpers.
+	 *
+	 * This operation does not need to perform any hpd state tracking as
+	 * the DRM core handles that maintenance and ensures the calls to enable
+	 * and disable hpd are balanced.
+	 *
 	 */
 	void (*enable_hpd)(struct drm_connector *connector);
 
@@ -1165,6 +1170,11 @@ struct drm_connector_helper_funcs {
 	 * This operation is optional.
 	 *
 	 * This callback is used by the drm_kms_helper_poll_disable() helpers.
+	 *
+	 * This operation does not need to perform any hpd state tracking as
+	 * the DRM core handles that maintenance and ensures the calls to enable
+	 * and disable hpd are balanced.
+	 *
 	 */
 	void (*disable_hpd)(struct drm_connector *connector);
 };
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 79d62856defbf781c81c2c7f19365d3f2da56457..c6565a6f9324cfb0cd2f8ed990d1321c539bedd5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -116,6 +116,10 @@ struct drm_plane_state {
 	/** @src_h: height of visible portion of plane (in 16.16) */
 	uint32_t src_h, src_w;
 
+	/** @hotspot_x: x offset to mouse cursor hotspot */
+	/** @hotspot_y: y offset to mouse cursor hotspot */
+	int32_t hotspot_x, hotspot_y;
+
 	/**
 	 * @alpha:
 	 * Opacity of the plane with 0 as completely transparent and 0xffff as
@@ -190,6 +194,16 @@ struct drm_plane_state {
 	 */
 	struct drm_property_blob *fb_damage_clips;
 
+	/**
+	 * @ignore_damage_clips:
+	 *
+	 * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
+	 * helper that the @fb_damage_clips blob property should be ignored.
+	 *
+	 * See :ref:`damage_tracking_properties` for more information.
+	 */
+	bool ignore_damage_clips;
+
 	/**
 	 * @src:
 	 *
@@ -748,6 +762,16 @@ struct drm_plane {
 	 * scaling.
 	 */
 	struct drm_property *scaling_filter_property;
+
+	/**
+	 * @hotspot_x_property: property to set mouse hotspot x offset.
+	 */
+	struct drm_property *hotspot_x_property;
+
+	/**
+	 * @hotspot_y_property: property to set mouse hotspot y offset.
+	 */
+	struct drm_property *hotspot_y_property;
 };
 
 #define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 3a574e8cd22f439cec7a3a3f164df39d67d23c37..75f9c4830564a4d2b5e053e15bf43e800d9be67d 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -26,7 +26,6 @@
 
 #include <linux/types.h>
 
-struct drm_atomic_state;
 struct drm_crtc;
 struct drm_framebuffer;
 struct drm_modeset_acquire_ctx;
@@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr
 int drm_plane_helper_disable_primary(struct drm_plane *plane,
 				     struct drm_modeset_acquire_ctx *ctx);
 void drm_plane_helper_destroy(struct drm_plane *plane);
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state);
 
 /**
  * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 9a50348bd5c04e9e1056fe62a28e2c145a18d320..5acc64954a8830117773442eb670068bf5f327c2 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -63,10 +63,10 @@ struct drm_file;
  * to an array, and as such should start at 0.
  */
 enum drm_sched_priority {
-	DRM_SCHED_PRIORITY_MIN,
-	DRM_SCHED_PRIORITY_NORMAL,
-	DRM_SCHED_PRIORITY_HIGH,
 	DRM_SCHED_PRIORITY_KERNEL,
+	DRM_SCHED_PRIORITY_HIGH,
+	DRM_SCHED_PRIORITY_NORMAL,
+	DRM_SCHED_PRIORITY_LOW,
 
 	DRM_SCHED_PRIORITY_COUNT
 };
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3f31baa3293f9819e271911db351456ad2a14519..8ff4add71f8833dc318fd71aad84cf7d20b4570b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -343,16 +343,19 @@ struct dma_buf {
 	/**
 	 * @exp_name:
 	 *
-	 * Name of the exporter; useful for debugging. See the
-	 * DMA_BUF_SET_NAME IOCTL.
+	 * Name of the exporter; useful for debugging. Must not be NULL
 	 */
 	const char *exp_name;
 
 	/**
 	 * @name:
 	 *
-	 * Userspace-provided name; useful for accounting and debugging,
-	 * protected by dma_resv_lock() on @resv and @name_lock for read access.
+	 * Userspace-provided name. Default value is NULL. If not NULL,
+	 * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+	 * char. Useful for accounting and debugging. Read/Write accesses
+	 * are protected by @name_lock
+	 *
+	 * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
 	 */
 	const char *name;
 
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 94e2c44c6569988106aa01dbe522f02b854a92f7..05dc9624897df9fe01b06ab18bea1ea23b53ecda 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -536,6 +536,7 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
 			  size_t count, loff_t *ppos);
 extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
 			   size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 
 #define __FB_DEFAULT_IOMEM_OPS_RDWR \
 	.fb_read	= fb_io_read, \
@@ -547,7 +548,7 @@ extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
 	.fb_imageblit	= cfb_imageblit
 
 #define __FB_DEFAULT_IOMEM_OPS_MMAP \
-	.fb_mmap	= NULL /* default implementation */
+	.fb_mmap	= fb_io_mmap
 
 #define FB_DEFAULT_IOMEM_OPS \
 	__FB_DEFAULT_IOMEM_OPS_RDWR, \
@@ -848,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname)
 }
 #endif
 
-/* Convenience logging macros */
+/*
+ * Convenience logging macros
+ */
+
 #define fb_err(fb_info, fmt, ...)					\
 	pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
 #define fb_notice(info, fmt, ...)					\
@@ -860,4 +864,12 @@ static inline bool fb_modesetting_disabled(const char *drvname)
 #define fb_dbg(fb_info, fmt, ...)					\
 	pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
 
+#define fb_warn_once(fb_info, fmt, ...)					\
+	pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+	WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+	fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
 #endif /* _LINUX_FB_H */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 8662b5aeea0c8f665539ded39940ab789783fe54..16122819edfeff872b91d989d1f6267640ae1391 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -713,7 +713,8 @@ struct drm_gem_open {
 /**
  * DRM_CAP_ASYNC_PAGE_FLIP
  *
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
  */
 #define DRM_CAP_ASYNC_PAGE_FLIP		0x7
 /**
@@ -773,6 +774,13 @@ struct drm_gem_open {
  * :ref:`drm_sync_objects`.
  */
 #define DRM_CAP_SYNCOBJ_TIMELINE	0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP	0x15
 
 /* DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
@@ -842,6 +850,31 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
 
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT	6
+
 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
 	__u64 capability;
@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
 struct drm_syncobj_wait {
 	__u64 handles;
 	/* absolute timeout */
@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
 	__u32 flags;
 	__u32 first_signaled; /* only valid when not waiting all */
 	__u32 pad;
+	/**
+	 * @deadline_nsec - fence deadline hint
+	 *
+	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+	 * set.
+	 */
+	__u64 deadline_nsec;
 };
 
 struct drm_syncobj_timeline_wait {
@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
 	__u32 flags;
 	__u32 first_signaled; /* only valid when not waiting all */
 	__u32 pad;
+	/**
+	 * @deadline_nsec - fence deadline hint
+	 *
+	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+	 * set.
+	 */
+	__u64 deadline_nsec;
 };
 
 /**
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 09e7a471ee30b8801ddd7dd38b19fb887b562675..95630f1701102b79851b2634be327b6e56462504 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -957,6 +957,15 @@ struct hdr_output_metadata {
  * Request that the page-flip is performed as soon as possible, ie. with no
  * delay due to waiting for vblank. This may cause tearing to be visible on
  * the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
  */
 #define DRM_MODE_PAGE_FLIP_ASYNC 0x02
 #define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
index 1834375390c452de8c851c7f593d2082a962526e..ccf6c2112468f3f53b093bc298dac2bbbfd35e6e 100644
--- a/include/uapi/drm/pvr_drm.h
+++ b/include/uapi/drm/pvr_drm.h
@@ -741,20 +741,18 @@ enum drm_pvr_ctx_priority {
  */
 enum drm_pvr_ctx_type {
 	/**
-	 * @DRM_PVR_CTX_TYPE_RENDER: Render context. Use &struct
-	 * drm_pvr_ioctl_create_render_context_args for context creation arguments.
+	 * @DRM_PVR_CTX_TYPE_RENDER: Render context.
 	 */
 	DRM_PVR_CTX_TYPE_RENDER = 0,
 
 	/**
-	 * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context. Use &struct
-	 * drm_pvr_ioctl_create_compute_context_args for context creation arguments.
+	 * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
 	 */
 	DRM_PVR_CTX_TYPE_COMPUTE,
 
 	/**
-	 * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data masters. Use
-	 * &struct drm_pvr_ioctl_create_transfer_context_args for context creation arguments.
+	 * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
+	 * master.
 	 */
 	DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
 };
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 1a7d7a689de38e8813ec9e6c8e03d2a773f2be41..dce1835eced49c105d300628959493f2a5d148c9 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -41,6 +41,7 @@ extern "C" {
 #define DRM_V3D_PERFMON_CREATE                    0x08
 #define DRM_V3D_PERFMON_DESTROY                   0x09
 #define DRM_V3D_PERFMON_GET_VALUES                0x0a
+#define DRM_V3D_SUBMIT_CPU                        0x0b
 
 #define DRM_IOCTL_V3D_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
 #define DRM_IOCTL_V3D_WAIT_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -56,6 +57,7 @@ extern "C" {
 						   struct drm_v3d_perfmon_destroy)
 #define DRM_IOCTL_V3D_PERFMON_GET_VALUES  DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
 						   struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU          DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
 
 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE             0x01
 #define DRM_V3D_SUBMIT_EXTENSION		  0x02
@@ -69,7 +71,13 @@ extern "C" {
 struct drm_v3d_extension {
 	__u64 next;
 	__u32 id;
-#define DRM_V3D_EXT_ID_MULTI_SYNC		0x01
+#define DRM_V3D_EXT_ID_MULTI_SYNC			0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD		0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY		0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY	0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY	0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY	0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY	0x07
 	__u32 flags; /* mbz */
 };
 
@@ -93,6 +101,7 @@ enum v3d_queue {
 	V3D_TFU,
 	V3D_CSD,
 	V3D_CACHE_CLEAN,
+	V3D_CPU,
 };
 
 /**
@@ -276,6 +285,7 @@ enum drm_v3d_param {
 	DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
 	DRM_V3D_PARAM_SUPPORTS_PERFMON,
 	DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+	DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
 };
 
 struct drm_v3d_get_param {
@@ -361,6 +371,234 @@ struct drm_v3d_submit_csd {
 	__u32 pad;
 };
 
+/**
+ * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
+ * indirect CSD
+ *
+ * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
+ * points to this extension to define a indirect CSD submission. It creates a
+ * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
+ * dependencies and, once they are signaled, it updates the CSD job config
+ * before allowing the CSD job execution.
+ */
+struct drm_v3d_indirect_csd {
+	struct drm_v3d_extension base;
+
+	/* Indirect CSD */
+	struct drm_v3d_submit_csd submit;
+
+	/* Handle of the indirect BO, that should be also attached to the
+	 * indirect CSD.
+	 */
+	__u32 indirect;
+
+	/* Offset within the BO where the workgroup counts are stored */
+	__u32 offset;
+
+	/* Workgroups size */
+	__u32 wg_size;
+
+	/* Indices of the uniforms with the workgroup dispatch counts
+	 * in the uniform stream. If the uniform rewrite is not needed,
+	 * the offset must be 0xffffffff.
+	 */
+	__u32 wg_uniform_offsets[3];
+};
+
+/**
+ * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
+ * a timestamp query
+ *
+ * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
+ * this extension to define a timestamp query submission. This CPU job will
+ * calculate the timestamp query and update the query value within the
+ * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
+ * query availability.
+ */
+struct drm_v3d_timestamp_query {
+	struct drm_v3d_extension base;
+
+	/* Array of queries' offsets within the timestamp BO for their value */
+	__u64 offsets;
+
+	/* Array of timestamp's syncobjs to indicate its availability */
+	__u64 syncs;
+
+	/* Number of queries */
+	__u32 count;
+
+	/* mbz */
+	__u32 pad;
+};
+
+/**
+ * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
+ * reset timestamp queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a reset timestamp submission. This CPU
+ * job will reset the timestamp queries based on value offset of the first
+ * query. Moreover, it will reset the timestamp syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_timestamp_query {
+	struct drm_v3d_extension base;
+
+	/* Array of timestamp's syncobjs to indicate its availability */
+	__u64 syncs;
+
+	/* Offset of the first query within the timestamp BO for its value */
+	__u32 offset;
+
+	/* Number of queries */
+	__u32 count;
+};
+
+/**
+ * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
+ * query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a copy timestamp query submission. This
+ * CPU job will copy the timestamp queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_timestamp_query {
+	struct drm_v3d_extension base;
+
+	/* Define if should write to buffer using 64 or 32 bits */
+	__u8 do_64bit;
+
+	/* Define if it can write to buffer even if the query is not available */
+	__u8 do_partial;
+
+	/* Define if it should write availability bit to buffer */
+	__u8 availability_bit;
+
+	/* mbz */
+	__u8 pad;
+
+	/* Offset of the buffer in the BO */
+	__u32 offset;
+
+	/* Stride of the buffer in the BO */
+	__u32 stride;
+
+	/* Number of queries */
+	__u32 count;
+
+	/* Array of queries' offsets within the timestamp BO for their value */
+	__u64 offsets;
+
+	/* Array of timestamp's syncobjs to indicate its availability */
+	__u64 syncs;
+};
+
+/**
+ * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
+ * reset performance queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a reset performance submission. This CPU
+ * job will reset the performance queries by resetting the values of the
+ * performance monitors. Moreover, it will reset the syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_performance_query {
+	struct drm_v3d_extension base;
+
+	/* Array of performance queries's syncobjs to indicate its availability */
+	__u64 syncs;
+
+	/* Number of queries */
+	__u32 count;
+
+	/* Number of performance monitors */
+	__u32 nperfmons;
+
+	/* Array of u64 user-pointers that point to an array of kperfmon_ids */
+	__u64 kperfmon_ids;
+};
+
+/**
+ * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
+ * performance query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a copy performance query submission. This
+ * CPU job will copy the performance queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_performance_query {
+	struct drm_v3d_extension base;
+
+	/* Define if should write to buffer using 64 or 32 bits */
+	__u8 do_64bit;
+
+	/* Define if it can write to buffer even if the query is not available */
+	__u8 do_partial;
+
+	/* Define if it should write availability bit to buffer */
+	__u8 availability_bit;
+
+	/* mbz */
+	__u8 pad;
+
+	/* Offset of the buffer in the BO */
+	__u32 offset;
+
+	/* Stride of the buffer in the BO */
+	__u32 stride;
+
+	/* Number of performance monitors */
+	__u32 nperfmons;
+
+	/* Number of performance counters related to this query pool */
+	__u32 ncounters;
+
+	/* Number of queries */
+	__u32 count;
+
+	/* Array of performance queries's syncobjs to indicate its availability */
+	__u64 syncs;
+
+	/* Array of u64 user-pointers that point to an array of kperfmon_ids */
+	__u64 kperfmon_ids;
+};
+
+struct drm_v3d_submit_cpu {
+	/* Pointer to a u32 array of the BOs that are referenced by the job.
+	 *
+	 * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
+	 * that contains the workgroup counts.
+	 *
+	 * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
+	 * that will contain the timestamp.
+	 *
+	 * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
+	 * one BO, that contains the timestamp.
+	 *
+	 * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
+	 * BOs. The first is the BO where the timestamp queries will be written
+	 * to. The second is the BO that contains the timestamp.
+	 *
+	 * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
+	 * BOs.
+	 *
+	 * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
+	 * BO, where the performance queries will be written.
+	 */
+	__u64 bo_handles;
+
+	/* Number of BO handles passed in (size is that times 4). */
+	__u32 bo_handle_count;
+
+	__u32 flags;
+
+	/* Pointer to an array of ioctl extensions*/
+	__u64 extensions;
+};
+
 enum {
 	V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
 	V3D_PERFCNT_FEP_VALID_PRIMS,
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index ff0a931833e25f32805ff734142a6cc83f5237c8..ff1f38889dcfb48ec1217e2e1f5c1a624d74dced 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -76,6 +76,27 @@ struct sync_file_info {
 	__u64	sync_fence_info;
 };
 
+/**
+ * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad:	must be zero
+ *
+ * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline
+ *
+ * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank).  For
+ * example
+ *
+ *     clock_gettime(CLOCK_MONOTONIC, &t);
+ *     deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline
+ */
+struct sync_set_deadline {
+	__u64	deadline_ns;
+	/* Not strictly needed for alignment but gives some possibility
+	 * for future extension:
+	 */
+	__u64	pad;
+};
+
 #define SYNC_IOC_MAGIC		'>'
 
 /*
@@ -87,5 +108,6 @@ struct sync_file_info {
 
 #define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
 #define SYNC_IOC_FILE_INFO	_IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+#define SYNC_IOC_SET_DEADLINE	_IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline)
 
 #endif /* _UAPI_LINUX_SYNC_H */