summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 12:58:59 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 12:58:59 -0300
commit0520a938e11c34a5ffc422b9316b85e294b0fbb2 (patch)
tree9e44592eccb90ed2d2b3a893fb602e4ca894f695
parent273d4428f8c4cc94c9598f8bcc006ec2e8c654ea (diff)
Linux-libre 4.7.3-gnupck-4.7.3-gnu
-rw-r--r--Documentation/scheduler/sched-BFS.txt107
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/entry.h4
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/mm/cache.c9
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/include/asm/elf.h1
-rw-r--r--arch/arm64/include/uapi/asm/auxvec.h2
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/sleep.S10
-rw-r--r--arch/arm64/mm/proc.S9
-rw-r--r--arch/parisc/include/uapi/asm/errno.h4
-rw-r--r--arch/parisc/kernel/processor.c8
-rw-r--r--arch/parisc/kernel/time.c12
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/x86/include/asm/tlbflush.h7
-rw-r--r--arch/x86/kernel/uprobes.c22
-rw-r--r--arch/x86/platform/uv/bios_uv.c3
-rw-r--r--drivers/acpi/cppc_acpi.c24
-rw-r--r--drivers/acpi/nfit.c3
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/block/floppy.c21
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c9
-rw-r--r--drivers/crypto/caam/caamalg.c13
-rw-r--r--drivers/crypto/caam/caamhash.c1
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c4
-rw-r--r--drivers/dax/pmem.c3
-rw-r--r--drivers/dma/sh/usb-dmac.c19
-rw-r--r--drivers/edac/sb_edac.c15
-rw-r--r--drivers/firmware/efi/capsule-loader.c8
-rw-r--r--drivers/firmware/efi/capsule.c6
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-max730x.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c91
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c117
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c9
-rw-r--r--drivers/hwmon/iio_hwmon.c24
-rw-r--r--drivers/hwmon/it87.c1
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/iio/industrialio-buffer.c23
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/rmi4/rmi_driver.c3
-rw-r--r--drivers/input/serio/i8042.c17
-rw-r--r--drivers/input/serio/libps2.c10
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/arm-smmu.c34
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c4
-rw-r--r--drivers/md/dm-round-robin.c7
-rw-r--r--drivers/of/base.c11
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.c20
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c17
-rw-r--r--drivers/s390/block/dasd.c10
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c22
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c46
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c43
-rw-r--r--drivers/usb/chipidea/udc.c7
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/config.c66
-rw-r--r--drivers/usb/core/devio.c19
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c42
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c4
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/renesas_usbhs/common.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c31
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c85
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--fs/aufs/branch.c15
-rw-r--r--fs/aufs/dbgaufs.c3
-rw-r--r--fs/aufs/dcsub.c3
-rw-r--r--fs/aufs/dentry.c8
-rw-r--r--fs/aufs/dentry.h2
-rw-r--r--fs/aufs/dinfo.c5
-rw-r--r--fs/aufs/file.c12
-rw-r--r--fs/aufs/file.h2
-rw-r--r--fs/aufs/finfo.c4
-rw-r--r--fs/aufs/iinfo.c5
-rw-r--r--fs/aufs/inode.c6
-rw-r--r--fs/aufs/inode.h2
-rw-r--r--fs/aufs/loop.c3
-rw-r--r--fs/aufs/module.c60
-rw-r--r--fs/aufs/module.h14
-rw-r--r--fs/aufs/sbinfo.c5
-rw-r--r--fs/aufs/super.h2
-rw-r--r--fs/aufs/vdir.c11
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/qgroup.h3
-rw-r--r--fs/btrfs/root-tree.c27
-rw-r--r--fs/seq_file.c4
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/i8042.h6
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/mfd/cros_ec.h15
-rw-r--r--include/linux/msi.h8
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/serio.h24
-rw-r--r--include/linux/skip_lists.h27
-rw-r--r--include/trace/events/timer.h14
-rw-r--r--init/Kconfig9
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/events/core.c60
-rw-r--r--kernel/events/uprobes.c5
-rw-r--r--kernel/irq/msi.c19
-rw-r--r--kernel/sched/bfs.c704
-rw-r--r--kernel/sched/bfs_sched.h25
-rw-r--r--kernel/sched/cputime.c15
-rw-r--r--kernel/skip_lists.c177
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/page_alloc.c50
-rw-r--r--mm/readahead.c9
-rw-r--r--mm/slub.c6
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/xprtmultipath.c8
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/usb/line6/pcm.c3
-rw-r--r--sound/usb/line6/pod.c12
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c6
-rw-r--r--tools/perf/builtin-mem.c3
-rw-r--r--tools/perf/util/symbol-elf.c3
-rw-r--r--tools/testing/nvdimm/test/nfit.c2
169 files changed, 1990 insertions, 918 deletions
diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
index 77a1e37b7..aaaa3c256 100644
--- a/Documentation/scheduler/sched-BFS.txt
+++ b/Documentation/scheduler/sched-BFS.txt
@@ -13,14 +13,14 @@ one workload cause massive detriment to another.
Design summary.
-BFS is best described as a single runqueue, O(n) lookup, earliest effective
-virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
-deadline first) and my previous Staircase Deadline scheduler. Each component
-shall be described in order to understand the significance of, and reasoning for
-it. The codebase when the first stable version was released was approximately
-9000 lines less code than the existing mainline linux kernel scheduler (in
-2.6.31). This does not even take into account the removal of documentation and
-the cgroups code that is not used.
+BFS is best described as a single runqueue, O(log n) insertion, O(1) lookup,
+earliest effective virtual deadline first design, loosely based on EEVDF
+(earliest eligible virtual deadline first) and my previous Staircase Deadline
+scheduler. Each component shall be described in order to understand the
+significance of, and reasoning for it. The codebase when the first stable
+version was released was approximately 9000 lines less code than the existing
+mainline linux kernel scheduler (in 2.6.31). This does not even take into
+account the removal of documentation and the cgroups code that is not used.
Design reasoning.
@@ -62,12 +62,13 @@ Design details.
Task insertion.
-BFS inserts tasks into each relevant queue as an O(1) insertion into a double
-linked list. On insertion, *every* running queue is checked to see if the newly
-queued task can run on any idle queue, or preempt the lowest running task on the
-system. This is how the cross-CPU scheduling of BFS achieves significantly lower
-latency per extra CPU the system has. In this case the lookup is, in the worst
-case scenario, O(n) where n is the number of CPUs on the system.
+BFS inserts tasks into each relevant queue as an O(log n) insertion into a
+customised skip list (as described by William Pugh). At the time of insertion,
+*every* running queue is checked to see if the newly queued task can run on any
+idle queue, or preempt the lowest running task on the system. This is how the
+cross-CPU scheduling of BFS achieves significantly lower latency per extra CPU
+the system has. In this case the lookup is, in the worst case scenario, O(k)
+where k is the number of online CPUs on the system.
Data protection.
@@ -117,7 +118,7 @@ higher priority than a currently running task on any cpu by virtue of the fact
that it has an earlier virtual deadline than the currently running task. The
earlier deadline is the key to which task is next chosen for the first and
second cases. Once a task is descheduled, it is put back on the queue, and an
-O(n) lookup of all queued-but-not-running tasks is done to determine which has
+O(1) lookup of all queued-but-not-running tasks is done to determine which has
the earliest deadline and that task is chosen to receive CPU next.
The CPU proportion of different nice tasks works out to be approximately the
@@ -134,26 +135,40 @@ Task lookup.
BFS has 103 priority queues. 100 of these are dedicated to the static priority
of realtime tasks, and the remaining 3 are, in order of best to worst priority,
-SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
-scheduling). When a task of these priorities is queued, a bitmap of running
-priorities is set showing which of these priorities has tasks waiting for CPU
-time. When a CPU is made to reschedule, the lookup for the next task to get
-CPU time is performed in the following way:
-
-First the bitmap is checked to see what static priority tasks are queued. If
-any realtime priorities are found, the corresponding queue is checked and the
-first task listed there is taken (provided CPU affinity is suitable) and lookup
-is complete. If the priority corresponds to a SCHED_ISO task, they are also
-taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
-to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
-stage, every task in the runlist that corresponds to that priority is checked
-to see which has the earliest set deadline, and (provided it has suitable CPU
-affinity) it is taken off the runqueue and given the CPU. If a task has an
-expired deadline, it is taken and the rest of the lookup aborted (as they are
-chosen in FIFO order).
-
-Thus, the lookup is O(n) in the worst case only, where n is as described
-earlier, as tasks may be chosen before the whole task list is looked over.
+SCHED_ISO (isochronous), SCHED_NORMAL/SCHED_BATCH, and SCHED_IDLEPRIO (idle
+priority scheduling).
+
+When a task of these priorities is queued, it is added to the skiplist with a
+different sorting value according to the type of task. For realtime tasks and
+isochronous tasks, it is their static priority. For SCHED_NORMAL and
+SCHED_BATCH tasks it is their virtual deadline value. For SCHED_IDLEPRIO tasks
+it is their virtual deadline value offset by an impossibly large value to ensure
+they never go before normal tasks. When isochronous or idleprio tasks do not
+meet the conditions that allow them to run with their special scheduling they
+are queued as per the remainder of the SCHED_NORMAL tasks.
+
+Lookup is performed by selecting the very first entry in the "level 0" skiplist
+as it will always be the lowest priority task having been sorted while being
+entered into the skiplist. This is usually an O(1) operation, however if there
+are tasks with limited affinity set and they are not able to run on the current
+CPU, the next in the list is checked and so on.
+
+Thus, the lookup for the common case is O(1) and O(n) in the worst case when
+the system has nothing but selectively affined tasks that can never run on the
+current CPU.
+
+
+Task removal.
+
+Removal of tasks in the skip list is an O(k) operation where 0 <= k < 16,
+corresponding with the "levels" in the skip list. 16 was chosen as the upper
+limit in the skiplist as it guarantees O(log n) insertion for up to 64k
+currently active tasks and most systems do not usually allow more than 32k
+tasks, and 16 levels makes the skiplist lookup components fit in 2 cachelines.
+The skiplist level chosen when inserting a task is pseudo-random but a minor
+optimisation is used to limit the max level based on the absolute number of
+queued tasks since high levels afford no advantage at low numbers of queued
+tasks yet increase overhead.
Scalability.
@@ -217,17 +232,14 @@ following preference (if idle):
This shows the SMT or "hyperthread" awareness in the design as well which will
choose a real idle core first before a logical SMT sibling which already has
-tasks on the physical CPU.
-
-Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
-However this benchmarking was performed on an earlier design that was far less
-scalable than the current one so it's hard to know how scalable it is in terms
-of both CPUs (due to the global runqueue) and heavily loaded machines (due to
-O(n) lookup) at this stage. Note that in terms of scalability, the number of
-_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
-quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
-results are very promising indeed. Benchmark contributions are most welcome.
-
+tasks on the physical CPU. Early benchmarking of BFS suggested scalability
+dropped off at the 16 CPU mark. However this benchmarking was performed on an
+earlier design that was far less scalable than the current one so it's hard to
+know how scalable it is in terms of number of CPUs (due to the global
+runqueue). Note that in terms of scalability, the number of _logical_ CPUs
+matters, not the number of _physical_ CPUs. Thus, a dual (2x) quad core (4X)
+hyperthreaded (2X) machine is effectively a 16X. Newer benchmark results are
+very promising indeed. Benchmark contributions are most welcome.
Features
@@ -240,6 +252,9 @@ and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
support for CGROUPS. The average user should neither need to know what these
are, nor should they need to be using them to have good desktop behaviour.
+Rudimentary support for the CPU controller CGROUP in the form of filesystem
+stubs for the expected CGROUP structure to allow applications that demand their
+presence to work but they do not have any functionality.
There are two "scheduler" tunables, the round robin interval and the
interactive flag. These can be accessed in
diff --git a/Makefile b/Makefile
index c6f4d6528..b65e84d6b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 7
-SUBLEVEL = 2
+SUBLEVEL = 3
EXTRAVERSION = -gnu
NAME = Psychotic Stoned Sheep
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index ad7860c5c..51597f344 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -142,7 +142,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs
- ld.as r12, [r12, PT_user_r25]
+ ld r12, [r12, PT_user_r25]
PUSH r12
#else
PUSH r25
@@ -198,7 +198,7 @@
; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG
- st.as r12, [sp, PT_user_r25]
+ st r12, [sp, PT_user_r25]
#endif
.endm
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index c1d36458b..4c6eed80c 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
- TRACE_ASM_IRQ_ENABLE
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 0f92d9743..89eeb3720 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 5a294b2c3..0b10efe3a 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -921,6 +921,15 @@ void arc_cache_init(void)
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (cpu)
+ return;
+
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 080203e3a..dc7f3bcc9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -270,6 +270,8 @@
#io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
status = "disabled";
};
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 579b6e654..a55384f4a 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -140,6 +140,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT);
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h
index 22d6d8885..4cf0c1778 100644
--- a/arch/arm64/include/uapi/asm/auxvec.h
+++ b/arch/arm64/include/uapi/asm/auxvec.h
@@ -19,4 +19,6 @@
/* vDSO location */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 2c6e598a9..aa68aadcd 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
isb
bl __create_page_tables // recreate kernel mapping
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
+
msr sctlr_el1, x19 // re-enable the MMU
isb
ic iallu // flush instructions fetched
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 9a3aec97a..ccf79d849 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,12 +101,20 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
/* enable the MMU early - so we can access sleep_save_stash by va */
adr_l lr, __enable_mmu /* __cpu_setup will return here */
- ldr x27, =_cpu_resume /* __enable_mmu will branch here */
+ adr_l x27, _resume_switched /* __enable_mmu will branch here */
adrp x25, idmap_pg_dir
adrp x26, swapper_pg_dir
b __cpu_setup
ENDPROC(cpu_resume)
+ .pushsection ".idmap.text", "ax"
+_resume_switched:
+ ldr x8, =_cpu_resume
+ br x8
+ENDPROC(_resume_switched)
+ .ltorg
+ .popsection
+
ENTRY(_cpu_resume)
mrs x1, mpidr_el1
adrp x8, mpidr_hash
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5bb61de23..9d37e967f 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
+
+ /*
+ * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+ * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+ * exception. Mask them until local_dbg_restore() in cpu_suspend()
+ * resets them.
+ */
+ disable_dbg
msr mdscr_el1, x10
+
msr sctlr_el1, x12
/*
* Restore oslsr_el1 by writing oslar_el1
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
index c0ae62520..274d5bc6e 100644
--- a/arch/parisc/include/uapi/asm/errno.h
+++ b/arch/parisc/include/uapi/asm/errno.h
@@ -97,10 +97,10 @@
#define ENOTCONN 235 /* Transport endpoint is not connected */
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
-#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
-#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 5adc339eb..0c2a94a0f 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
-extern int update_cr16_clocksource(void); /* from time.c */
-
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
@@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
}
#endif
- /* If we've registered more than one cpu,
- * we'll use the jiffies clocksource since cr16
- * is not synchronized between CPUs.
- */
- update_cr16_clocksource();
-
return 0;
}
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 31ec99a5f..5eea7dc01 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -220,18 +220,6 @@ static struct clocksource clocksource_cr16 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-int update_cr16_clocksource(void)
-{
- /* since the cr16 cycle counters are not synchronized across CPUs,
- we'll check if we should switch to a safe clocksource: */
- if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
- clocksource_change_rating(&clocksource_cr16, 0);
- return 1;
- }
-
- return 0;
-}
-
void __init start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index 1dd5bd8a8..133055311 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -81,7 +81,7 @@
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
+ .exit.text : { EXIT_TEXT }
.exit.data : { *(.exit.data) }
.preinit_array : {
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4e5be94e0..6fa85944a 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -135,7 +135,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
static inline void __native_flush_tlb(void)
{
+ /*
+ * If current->mm == NULL then we borrow a mm which may change during a
+ * task switch and therefore we must not be preempted while we write CR3
+ * back:
+ */
+ preempt_disable();
native_write_cr3(native_read_cr3());
+ preempt_enable();
}
static inline void __native_flush_tlb_global_irq_disabled(void)
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 6c1ff31d9..495c776de 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -357,20 +357,22 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
*cursor &= 0xfe;
}
/*
- * Similar treatment for VEX3 prefix.
- * TODO: add XOP/EVEX treatment when insn decoder supports them
+ * Similar treatment for VEX3/EVEX prefix.
+ * TODO: add XOP treatment when insn decoder supports them
*/
- if (insn->vex_prefix.nbytes == 3) {
+ if (insn->vex_prefix.nbytes >= 3) {
/*
* vex2: c5 rvvvvLpp (has no b bit)
* vex3/xop: c4/8f rxbmmmmm wvvvvLpp
* evex: 62 rxbR00mm wvvvv1pp zllBVaaa
- * (evex will need setting of both b and x since
- * in non-sib encoding evex.x is 4th bit of MODRM.rm)
- * Setting VEX3.b (setting because it has inverted meaning):
+ * Setting VEX3.b (setting because it has inverted meaning).
+ * Setting EVEX.x since (in non-SIB encoding) EVEX.x
+ * is the 4th bit of MODRM.rm, and needs the same treatment.
+ * For VEX3-encoded insns, VEX3.x value has no effect in
+ * non-SIB encoding, the change is superfluous but harmless.
*/
cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1;
- *cursor |= 0x20;
+ *cursor |= 0x60;
}
/*
@@ -415,12 +417,10 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn)
reg = MODRM_REG(insn); /* Fetch modrm.reg */
reg2 = 0xff; /* Fetch vex.vvvv */
- if (insn->vex_prefix.nbytes == 2)
- reg2 = insn->vex_prefix.bytes[1];
- else if (insn->vex_prefix.nbytes == 3)
+ if (insn->vex_prefix.nbytes)
reg2 = insn->vex_prefix.bytes[2];
/*
- * TODO: add XOP, EXEV vvvv reading.
+ * TODO: add XOP vvvv reading.
*
* vex.vvvv field is in bits 6-3, bits are inverted.
* But in 32-bit mode, high-order bit may be ignored.
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 815fec6e0..17943a89d 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -188,7 +188,8 @@ EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
void uv_bios_init(void)
{
uv_systab = NULL;
- if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) {
+ if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
+ !efi.uv_systab || efi_runtime_disabled()) {
pr_crit("UV: UVsystab: missing\n");
return;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 8adac69db..2e9817328 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -299,8 +299,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
cpc_ptr = per_cpu(cpc_desc_ptr, i);
- if (!cpc_ptr)
- continue;
+ if (!cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
pdomain = &(cpc_ptr->domain_info);
cpumask_set_cpu(i, pr->shared_cpu_map);
@@ -322,8 +324,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -353,8 +357,10 @@ int acpi_get_psd_map(struct cpudata **all_cpu_data)
continue;
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr)
- continue;
+ if (!match_cpc_ptr) {
+ retval = -EFAULT;
+ goto err_ret;
+ }
match_pdomain = &(match_cpc_ptr->domain_info);
if (match_pdomain->domain != pdomain->domain)
@@ -595,9 +601,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
- /* Plug it into this CPUs CPC descriptor. */
- per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
/* Parse PSD data for this CPU */
ret = acpi_get_psd(cpc_ptr, handle);
if (ret)
@@ -610,6 +613,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
+ /* Plug PSD data into this CPUs CPC descriptor. */
+ per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
+
/* Everything looks okay */
pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 1f0e06065..375c10f38 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1396,11 +1396,12 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+ const u32 STATUS_MASK = 0x80000037;
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- return readl(mmio->addr.base + offset);
+ return readl(mmio->addr.base + offset) & STATUS_MASK;
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5f28cf778..f3c022292 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1967,7 +1967,7 @@ int __init acpi_scan_init(void)
static struct acpi_probe_entry *ape;
static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
static int __init acpi_match_madt(struct acpi_subtable_header *header,
const unsigned long end)
@@ -1986,7 +1986,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
if (acpi_disabled)
return 0;
- spin_lock(&acpi_probe_lock);
+ mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
@@ -1999,7 +1999,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
count++;
}
}
- spin_unlock(&acpi_probe_lock);
+ mutex_unlock(&acpi_probe_mutex);
return count;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a1dcf12d3..84708a5f8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
opened_bdev[drive] = bdev;
+ if (!(mode & (FMODE_READ|FMODE_WRITE))) {
+ res = -EINVAL;
+ goto out;
+ }
+
res = -ENXIO;
if (!floppy_track_buffer) {
@@ -3706,15 +3711,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
- goto out;
- }
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
res = -EROFS;
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index ca5519c58..e7b98c4d4 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -91,6 +91,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -109,10 +110,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074),
- DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078),
- DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268),
- DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c),
+ DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x0268),
+ DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x026c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ea8189f4b..6dc597126 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ if (!ctx->authsize)
+ return 0;
+
/* NULL encryption / decryption */
if (!ctx->enckeylen)
return aead_null_set_sh_desc(aead);
@@ -614,7 +617,7 @@ skip_enc:
keys_fit_inline = true;
/* aead_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
+ desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
@@ -645,13 +648,13 @@ copy_iv:
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -697,7 +700,7 @@ copy_iv:
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5845d4a08..e9703f9d1 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1897,6 +1897,7 @@ caam_hash_alloc(struct caam_hash_template *template,
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 0794f1cc0..42f0f229f 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -392,7 +392,7 @@ static void nx_of_update_msc(struct device *dev,
((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
i < msc->triplets;
i++) {
- if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) {
+ if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
dev_err(dev, "unknown function code/mode "
"combo: %d/%d (ignored)\n", msc->fc,
msc->mode);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1e8852a8a..4c9deef6a 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
.setkey = qat_alg_ablkcipher_xts_setkey,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
},
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 55d510e36..82e6743c4 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -118,6 +118,9 @@ static int dax_pmem_probe(struct device *dev)
return rc;
}
+ /* adjust the dax_region resource to the start of data */
+ res.start += le64_to_cpu(pfn_sb->dataoff);
+
nd_region = to_nd_region(dev->parent);
dax_region = alloc_dax_region(dev, nd_region->id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 749f1bd5d..06ecdc38c 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
{
struct usb_dmac_chan *chan = dev;
irqreturn_t ret = IRQ_NONE;
- u32 mask = USB_DMACHCR_TE;
- u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+ u32 mask = 0;
u32 chcr;
+ bool xfer_end = false;
spin_lock(&chan->vc.lock);
chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
- if (chcr & check_bits)
- mask |= USB_DMACHCR_DE | check_bits;
+ if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+ mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+ if (chcr & USB_DMACHCR_DE)
+ xfer_end = true;
+ ret |= IRQ_HANDLED;
+ }
if (chcr & USB_DMACHCR_NULL) {
/* An interruption of TE will happen after we set FTE */
mask |= USB_DMACHCR_NULL;
chcr |= USB_DMACHCR_FTE;
ret |= IRQ_HANDLED;
}
- usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+ if (mask)
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
- if (chcr & check_bits) {
+ if (xfer_end)
usb_dmac_isr_transfer_end(chan);
- ret |= IRQ_HANDLED;
- }
spin_unlock(&chan->vc.lock);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4fb2eb7c8..ce0067b7a 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
/* Knight's Landing Support */
/*
* KNL's memory channels are swizzled between memory controllers.
- * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2
+ * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
*/
-#define knl_channel_remap(channel) ((channel + 3) % 6)
+#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
@@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg)
mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
- return knl_channel_remap(mc*3 + chan);
+ return knl_channel_remap(mc, chan);
}
/*
@@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
} else {
char A = *("A");
- channel = knl_channel_remap(channel);
+ /*
+ * Reported channel is in range 0-2, so we can't map it
+ * back to mc. To figure out mc we check machine check
+ * bank register that reported this error.
+ * bank15 means mc0 and bank16 means mc1.
+ */
+ channel = knl_channel_remap(m->bank == 16, channel);
channel_mask = 1 << channel;
+
snprintf(msg, sizeof(msg),
"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
overflow ? " OVERFLOW" : "",
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index c99c24bc7..9ae6c116c 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/efi.h>
+#include <linux/vmalloc.h>
#define NO_FURTHER_WRITE_ACTION -1
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
int ret;
void *cap_hdr_temp;
- cap_hdr_temp = kmap(cap_info->pages[0]);
+ cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
if (!cap_hdr_temp) {
- pr_debug("%s: kmap() failed\n", __func__);
+ pr_debug("%s: vmap() failed\n", __func__);
return -EFAULT;
}
ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
- kunmap(cap_info->pages[0]);
+ vunmap(cap_hdr_temp);
if (ret) {
pr_err("%s: efi_capsule_update() failed\n", __func__);
return ret;
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 53b9fd229..6eedff45e 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -190,9 +190,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule,
* map the capsule described by @capsule with its data in @pages and
* send it to the firmware via the UpdateCapsule() runtime service.
*
- * @capsule must be a virtual mapping of the first page in @pages
- * (@pages[0]) in the kernel address space. That is, a
- * capsule_header_t that describes the entire contents of the capsule
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * A capsule_header_t that describes the entire contents of the capsule
* must be at the start of the first data page.
*
* Even though this function will validate that the firmware supports
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d7860614f..5d457ff61 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -50,6 +50,7 @@ config GPIO_DEVRES
config OF_GPIO
def_bool y
depends on OF
+ depends on HAS_IOMEM
config GPIO_ACPI
def_bool y
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 08807368f..946d09195 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.parent = dev;
ts->chip.owner = THIS_MODULE;
+ ret = gpiochip_add_data(&ts->chip, ts);
+ if (ret)
+ goto exit_destroy;
+
/*
* initialize pullups according to platform data and cache the
* register values for later use.
@@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts)
}
}
- ret = gpiochip_add_data(&ts->chip, ts);
- if (ret)
- goto exit_destroy;
-
return ret;
exit_destroy:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e055d5be1..56475b1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -415,6 +415,8 @@ struct amdgpu_mman {
/* custom LRU management */
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
+ /* guard for log2_size array, don't add anything in between */
+ struct amdgpu_mman_lru guard;
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -637,9 +639,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
int amdgpu_gart_init(struct amdgpu_device *adev);
void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 983175363..fe872b82e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
(le16_to_cpu(path->usConnObjectId) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
+ /* Skip TV/CV support */
+ if ((le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_TV1_SUPPORT) ||
+ (le16_to_cpu(path->usDeviceTag) ==
+ ATOM_DEVICE_CV_SUPPORT))
+ continue;
+
+ if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
+ DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
+ con_obj_id, le16_to_cpu(path->usDeviceTag));
+ continue;
+ }
+
connector_type =
object_connector_convert[con_obj_id];
connector_object_id = con_obj_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 921bce2df..0feea347f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
-void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 34e35423b..194cfc1a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -288,7 +288,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -309,10 +309,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b9053af4..46c5297f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
adev = amdgpu_get_adev(bo->bdev);
ring = adev->mman.buffer_funcs_ring;
- old_start = old_mem->start << PAGE_SHIFT;
- new_start = new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start << PAGE_SHIFT;
+ new_start = (u64)new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -943,6 +943,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->lru[tbo->mem.mem_type];
lru->lru[tbo->mem.mem_type] = &tbo->lru;
+ while ((++lru)->lru[tbo->mem.mem_type] == res)
+ lru->lru[tbo->mem.mem_type] = &tbo->lru;
return res;
}
@@ -953,6 +955,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
struct list_head *res = lru->swap_lru;
lru->swap_lru = &tbo->swap;
+ while ((++lru)->swap_lru == res)
+ lru->swap_lru = &tbo->swap;
return res;
}
@@ -1004,6 +1008,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
}
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ adev->mman.guard.lru[j] = NULL;
+ adev->mman.guard.swap_lru = NULL;
+
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c56485e4c..cb429bfdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
/*(DEBLOBBED)*/
@@ -1042,6 +1043,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 507160a4f..6d34dcd60 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2748,8 +2748,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
u64 wb_gpu_addr;
u32 *buf;
struct bonaire_mqd *mqd;
-
- gfx_v7_0_cp_compute_enable(adev, true);
+ struct amdgpu_ring *ring;
/* fix up chicken bits */
tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2784,7 +2783,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
/* init the queues. Just two for now. */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+ ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj == NULL) {
r = amdgpu_bo_create(adev,
@@ -2963,6 +2962,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
amdgpu_bo_unreserve(ring->mqd_obj);
ring->ready = true;
+ }
+
+ gfx_v7_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+
r = amdgpu_ring_test_ring(ring);
if (r)
ring->ready = false;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 85c4debf4..fd3553beb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1578,6 +1578,9 @@ static int intel_runtime_suspend(struct device *device)
assert_forcewakes_inactive(dev_priv);
+ if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+ intel_hpd_poll_init(dev_priv);
+
DRM_DEBUG_KMS("Device suspended\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3378d42ce..74cb400c8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -281,6 +281,9 @@ struct i915_hotplug {
u32 short_port_mask;
struct work_struct dig_port_work;
+ struct work_struct poll_init_work;
+ bool poll_enabled;
+
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
@@ -2793,6 +2796,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9d..e856f7906 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2826,6 +2826,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
}
i915_gem_cleanup_stolen(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5012c5be2..3c123d430 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1522,6 +1522,7 @@ enum skl_disp_power_wells {
#define BALANCE_LEG_MASK(port) (7<<(8+3*(port)))
/* Balance leg disable bits */
#define BALANCE_LEG_DISABLE_SHIFT 23
+#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port)))
/*
* Fence registers
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce..74eca43b2 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return;
+ i915_audio_component_get_power(dev);
+
/*
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
+
+ i915_audio_component_put_power(dev);
}
/* Get CDCLK in kHz */
@@ -654,6 +658,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
!IS_HASWELL(dev_priv))
return 0;
+ i915_audio_component_get_power(dev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
intel_encoder = dev_priv->dig_port_map[port];
@@ -704,6 +709,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
unlock:
mutex_unlock(&dev_priv->av_mutex);
+ i915_audio_component_put_power(dev);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66..a3f87d668 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -327,10 +327,25 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
+ bool reenable_hpd;
u32 adpa;
bool ret;
u32 save_adpa;
+ /*
+ * Doing a force trigger causes a hpd interrupt to get sent, which can
+ * get us stuck in a loop if we're polling:
+ * - We enable power wells and reset the ADPA
+ * - output_poll_exec does force probe on VGA, triggering a hpd
+ * - HPD handler waits for poll to unlock dev->mode_config.mutex
+ * - output_poll_exec shuts off the ADPA, unlocks
+ * dev->mode_config.mutex
+ * - HPD handler runs, resets ADPA and brings us back to the start
+ *
+ * Just disable HPD interrupts here to prevent this
+ */
+ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
@@ -353,6 +368,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+ if (reenable_hpd)
+ intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+
return ret;
}
@@ -713,11 +731,11 @@ static int intel_crt_set_property(struct drm_connector *connector,
return 0;
}
-static void intel_crt_reset(struct drm_connector *connector)
+void intel_crt_reset(struct drm_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
+ struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crt *crt = intel_attached_crt(connector);
+ struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
@@ -739,7 +757,6 @@ static void intel_crt_reset(struct drm_connector *connector)
*/
static const struct drm_connector_funcs intel_crt_connector_funcs = {
- .reset = intel_crt_reset,
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -757,6 +774,7 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ .reset = intel_crt_reset,
.destroy = intel_encoder_destroy,
};
@@ -902,5 +920,5 @@ void intel_crt_init(struct drm_device *dev)
dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
}
- intel_crt_reset(connector);
+ intel_crt_reset(&crt->base.base);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df3..12c4f4356 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80007011, 0x000000CD, 0x1 },
{ 0x80009010, 0x000000C0, 0x1 },
{ 0x0000201B, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x1 },
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
- { 0x80007011, 0x000000CD, 0x0 },
+ { 0x80007011, 0x000000CD, 0x3 },
{ 0x80009010, 0x000000C0, 0x3 },
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x80005012, 0x000000C0, 0x3 },
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
}
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+ int n_hdmi_entries;
+ int hdmi_level;
+ int hdmi_default_entry;
+
+ hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_BROXTON(dev_priv))
+ return hdmi_level;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+ hdmi_default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ hdmi_default_entry = 6;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ hdmi_default_entry = 7;
+ }
+
+ /* Choose a good default if VBT is badly populated */
+ if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
+ hdmi_level >= n_hdmi_entries)
+ hdmi_level = hdmi_default_entry;
+
+ return hdmi_level;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
+ int i, n_hdmi_entries, n_dp_entries, n_edp_entries,
size;
int hdmi_level;
enum port port;
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
const struct ddi_buf_trans *ddi_translations;
port = intel_ddi_get_encoder_port(encoder);
- hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+ hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
if (IS_BROXTON(dev_priv)) {
if (encoder->type != INTEL_OUTPUT_HDMI)
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
ddi_translations_hdmi =
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
- hdmi_default_entry = 8;
/* If we're boosting the current, set bit 31 of trans1 */
if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
dev_priv->vbt.ddi_port_info[port].dp_boost_level)
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
} else if (IS_HASWELL(dev_priv)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_default_entry = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
}
switch (encoder->type) {
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
if (encoder->type != INTEL_OUTPUT_HDMI)
return;
- /* Choose a good default if VBT is badly populated */
- if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
- hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_default_entry;
-
/* Entry 9 is for HDMI: */
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
@@ -1371,14 +1396,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
-static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+ enum port port, uint8_t iboost)
{
+ u32 tmp;
+
+ tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
+ if (iboost)
+ tmp |= iboost << BALANCE_LEG_SHIFT(port);
+ else
+ tmp |= BALANCE_LEG_DISABLE(port);
+ I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+}
+
+static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum port port = intel_dig_port->port;
+ int type = encoder->type;
const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
uint8_t dp_iboost, hdmi_iboost;
int n_entries;
- u32 reg;
/* VBT may override standard boost values */
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
@@ -1420,16 +1461,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
return;
}
- reg = I915_READ(DISPIO_CR_TX_BMU_CR0);
- reg &= ~BALANCE_LEG_MASK(port);
- reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port));
-
- if (iboost)
- reg |= iboost << BALANCE_LEG_SHIFT(port);
- else
- reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port);
+ _skl_ddi_set_iboost(dev_priv, port, iboost);
- I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
+ if (port == PORT_A && intel_dig_port->max_lanes == 4)
+ _skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
@@ -1560,7 +1595,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+ skl_ddi_set_iboost(encoder, level);
else if (IS_BROXTON(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
@@ -1629,6 +1664,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ int level = intel_ddi_hdmi_level(dev_priv, port);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ skl_ddi_set_iboost(intel_encoder, level);
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f7f0f0181..94144a70b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1052,7 +1052,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
-
+void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -1346,6 +1346,8 @@ void intel_dsi_init(struct drm_device *dev);
/* intel_dvo.c */
void intel_dvo_init(struct drm_device *dev);
+/* intel_hotplug.c */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
/* legacy fbdev emulation in intel_fbdev.c */
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005..2c49458a9 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -453,20 +453,47 @@ void intel_hpd_irq_handler(struct drm_device *dev,
*
* This is a separate step from interrupt enabling to simplify the locking rules
* in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_init(), which enables connector polling
*/
void intel_hpd_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
int i;
for_each_hpd_pin(i) {
dev_priv->hotplug.stats[i].count = 0;
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
}
+
+ WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->hotplug.poll_init_work);
+
+ /*
+ * Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked checks happy.
+ */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev_priv->dev);
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void i915_hpd_poll_init_work(struct work_struct *work) {
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ hotplug.poll_init_work);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ bool enabled;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *intel_connector =
+ to_intel_connector(connector);
connector->polled = intel_connector->polled;
/* MST has a dynamic intel_connector->encoder and it's reprobing
@@ -475,24 +502,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
continue;
if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
- intel_connector->encoder->hpd_pin > HPD_NONE)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
+ intel_connector->encoder->hpd_pin > HPD_NONE) {
+ connector->polled = enabled ?
+ DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT :
+ DRM_CONNECTOR_POLL_HPD;
+ }
}
+ if (enabled)
+ drm_kms_helper_poll_enable_locked(dev);
+
+ mutex_unlock(&dev->mode_config.mutex);
+
/*
- * Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked checks happy.
+ * We might have missed any hotplugs that happened while we were
+ * in the middle of disabling polling
*/
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
+ if (!enabled)
+ drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ * @enabled: Whether to enable or disable polling
+ *
+ * This function enables polling for all connectors, regardless of whether or
+ * not they support hotplug detection. Under certain conditions HPD may not be
+ * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init(), which restores hpd handling.
+ */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+{
+ WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+
+ /*
+ * We might already be holding dev->mode_config.mutex, so do this in a
+ * seperate worker
+ * As well, there's no issue if we race here since we always reschedule
+ * this worker anyway
+ */
+ schedule_work(&dev_priv->hotplug.poll_init_work);
}
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
}
@@ -509,5 +574,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_work_sync(&dev_priv->hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
}
+
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+ bool ret = false;
+
+ if (pin == HPD_NONE)
+ return false;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
+ dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ ret = true;
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ return ret;
+}
+
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+ if (pin == HPD_NONE)
+ return;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7..2592b39ff 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -952,6 +952,7 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
+ struct intel_encoder *encoder;
enum pipe pipe;
/*
@@ -987,6 +988,12 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(dev_priv->dev, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
i915_redisable_vga_power_on(dev_priv->dev);
}
@@ -1000,6 +1007,8 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
synchronize_irq(dev_priv->dev->irq);
vlv_power_sequencer_reset(dev_priv);
+
+ intel_hpd_poll_init(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index b550ba5fa..89449871b 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -110,24 +110,24 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "in%d_input",
- in_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "in%d_input",
+ in_i++);
break;
case IIO_TEMP:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "temp%d_input",
+ temp_i++);
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "curr%d_input",
+ curr_i++);
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = kasprintf(GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "humidity%d_input",
+ humidity_i++);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 730d84028..55bf47934 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -2015,6 +2015,7 @@ static struct attribute *it87_attributes_in[] = {
&sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */
&sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */
&sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */
+ NULL
};
static const struct attribute_group it87_group_in = {
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index a0d95ff68..2d5ff8639 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
msg->outsize = request_len;
msg->insize = response_len;
- result = cros_ec_cmd_xfer(bus->ec, msg);
+ result = cros_ec_cmd_xfer_status(bus->ec, msg);
if (result < 0) {
dev_err(dev, "Error transferring EC i2c message %d\n", result);
goto exit;
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 8de073aed..215ac87f6 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
- goto err;
+ goto err_with_revert;
}
p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
@@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
err_with_put:
i2c_put_adapter(adap);
+ err_with_revert:
+ of_changeset_revert(&priv->chan[new_chan].chgset);
err:
dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
return ret;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 90462fcf5..49bf9c59f 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -107,6 +107,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait;
int ret;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
else
to_wait = min_t(size_t, n / datum_size, rb->watermark);
+ add_wait_queue(&rb->pollq, &wait);
do {
- ret = wait_event_interruptible(rb->pollq,
- iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
- if (ret)
- return ret;
+ if (!indio_dev->info) {
+ ret = -ENODEV;
+ break;
+ }
- if (!indio_dev->info)
- return -ENODEV;
+ if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
+ }
ret = rb->access->read_first_n(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
+ remove_wait_queue(&rb->pollq, &wait);
return ret;
}
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index acc5394af..29485bc42 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
/* Reset the KBC controller to clear all previous status.*/
reset_control_assert(kbc->rst);
udelay(100);
- reset_control_assert(kbc->rst);
+ reset_control_deassert(kbc->rst);
udelay(100);
tegra_kbc_config_pins(kbc);
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index faa295ec4..c83bce890 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -553,7 +553,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
goto free_struct_buff;
reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
- map_offset = 0;
for (i = 0; i < rdesc->num_registers; i++) {
struct rmi_register_desc_item *item = &rdesc->registers[i];
int reg_size = struct_buf[offset];
@@ -576,6 +575,8 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
item->reg = reg;
item->reg_size = reg_size;
+ map_offset = 0;
+
do {
for (b = 0; b < 7; b++) {
if (struct_buf[offset] & (0x1 << b))
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 454195709..405252a88 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1277,6 +1277,7 @@ static int __init i8042_create_kbd_port(void)
serio->start = i8042_start;
serio->stop = i8042_stop;
serio->close = i8042_port_close;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
@@ -1304,6 +1305,7 @@ static int __init i8042_create_aux_port(int idx)
serio->write = i8042_aux_write;
serio->start = i8042_start;
serio->stop = i8042_stop;
+ serio->ps2_cmd_mutex = &i8042_mutex;
serio->port_data = port;
serio->dev.parent = &i8042_platform_device->dev;
if (idx < 0) {
@@ -1373,21 +1375,6 @@ static void i8042_unregister_ports(void)
}
}
-/*
- * Checks whether port belongs to i8042 controller.
- */
-bool i8042_check_port_owner(const struct serio *port)
-{
- int i;
-
- for (i = 0; i < I8042_NUM_PORTS; i++)
- if (i8042_ports[i].serio == port)
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(i8042_check_port_owner);
-
static void i8042_free_irqs(void)
{
if (i8042_aux_irq_registered)
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 316f2c897..83e9c663a 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte);
void ps2_begin_command(struct ps2dev *ps2dev)
{
- mutex_lock(&ps2dev->cmd_mutex);
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_lock_chip();
+ mutex_lock(m);
}
EXPORT_SYMBOL(ps2_begin_command);
void ps2_end_command(struct ps2dev *ps2dev)
{
- if (i8042_check_port_owner(ps2dev->serio))
- i8042_unlock_chip();
+ struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
- mutex_unlock(&ps2dev->cmd_mutex);
+ mutex_unlock(m);
}
EXPORT_SYMBOL(ps2_end_command);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5f6b3bcab..46ba2b64b 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -879,7 +879,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
* We may have concurrent producers, so we need to be careful
* not to touch any of the shadow cmdq state.
*/
- queue_read(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
dev_err(smmu->dev, "skipping command in error state:\n");
for (i = 0; i < ARRAY_SIZE(cmd); ++i)
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
@@ -890,7 +890,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
return;
}
- queue_write(cmd, Q_ENT(q, idx), q->ent_dwords);
+ queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
@@ -1034,6 +1034,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
case STRTAB_STE_0_CFG_S2_TRANS:
ste_live = true;
break;
+ case STRTAB_STE_0_CFG_ABORT:
+ if (disable_bypass)
+ break;
default:
BUG(); /* STE corruption */
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9345a3fcb..31422d440 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -686,8 +686,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
- int flags, ret;
- u32 fsr, fsynr, resume;
+ u32 fsr, fsynr;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -701,34 +700,15 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
if (!(fsr & FSR_FAULT))
return IRQ_NONE;
- if (fsr & FSR_IGN)
- dev_err_ratelimited(smmu->dev,
- "Unexpected context fault (fsr 0x%x)\n",
- fsr);
-
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
- flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
-
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
- if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
- ret = IRQ_HANDLED;
- resume = RESUME_RETRY;
- } else {
- dev_err_ratelimited(smmu->dev,
- "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
- iova, fsynr, cfg->cbndx);
- ret = IRQ_NONE;
- resume = RESUME_TERMINATE;
- }
-
- /* Clear the faulting FSR */
- writel(fsr, cb_base + ARM_SMMU_CB_FSR);
- /* Retry or terminate any stalled transactions */
- if (fsr & FSR_SS)
- writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+ dev_err_ratelimited(smmu->dev,
+ "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, cfg->cbndx);
- return ret;
+ writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+ return IRQ_HANDLED;
}
static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -837,7 +817,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
/* SCTLR */
- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
if (stage1)
reg |= SCTLR_S1_ASIDPNE;
#ifdef __BIG_ENDIAN
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ea5a9ebf0..97a23082e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!iovad)
return;
- put_iova_domain(iovad);
+ if (iovad->granule)
+ put_iova_domain(iovad);
kfree(iovad);
domain->iova_cookie = NULL;
}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 8c6139986..def8ca1c9 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -286,12 +286,14 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
int prot = IOMMU_READ;
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
- if (attr & ARM_V7S_PTE_AP_RDONLY)
+ if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE;
+ if (pte & ARM_V7S_ATTR_XN(lvl))
+ prot |= IOMMU_NOEXEC;
return prot;
}
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 4ace1da17..6c25213ab 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
struct path_info *pi = NULL;
struct dm_path *current_path = NULL;
+ local_irq_save(flags);
current_path = *this_cpu_ptr(s->current_path);
if (current_path) {
percpu_counter_dec(&s->repeat_count);
- if (percpu_counter_read_positive(&s->repeat_count) > 0)
+ if (percpu_counter_read_positive(&s->repeat_count) > 0) {
+ local_irq_restore(flags);
return current_path;
+ }
}
- spin_lock_irqsave(&s->lock, flags);
+ spin_lock(&s->lock);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 8bb3d1adf..c6a8f47f5 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -2318,20 +2318,13 @@ struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
struct of_endpoint endpoint;
- struct device_node *node, *prev_node = NULL;
-
- while (1) {
- node = of_graph_get_next_endpoint(parent, prev_node);
- of_node_put(prev_node);
- if (!node)
- break;
+ struct device_node *node = NULL;
+ for_each_endpoint_of_node(parent, node) {
of_graph_parse_endpoint(node, &endpoint);
if (((port_reg == -1) || (endpoint.port == port_reg)) &&
((reg == -1) || (endpoint.id == reg)))
return node;
-
- prev_node = node;
}
return NULL;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a080f4496..565e2a4e6 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1277,6 +1277,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
pci_msi_domain_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 11623c6b0..44e69c963 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pc->pcdev);
}
- ret = meson_gpiolib_register(pc);
- if (ret) {
- pinctrl_unregister(pc->pcdev);
- return ret;
- }
-
- return 0;
+ return meson_gpiolib_register(pc);
}
static struct platform_driver meson_pinctrl_driver = {
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 634b4d30e..b3e772390 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + offset * 4);
- /*
- * Suppose BIOS or Bootloader sets specific debounce for the
- * GPIO. if not, set debounce to be 2.75ms and remove glitch.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
-
pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
writel(pin_reg, gpio_dev->base + offset * 4);
spin_unlock_irqrestore(&gpio_dev->lock, flags);
@@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d)
spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
- /*
- Suppose BIOS or Bootloader sets specific debounce for the
- GPIO. if not, set debounce to be 2.75ms.
- */
- if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
- pin_reg |= 0xf;
- pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
- pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
- }
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index b6e161f71..6c084b266 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -380,3 +380,20 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
return ret;
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
+{
+ int ret;
+
+ ret = cros_ec_cmd_xfer(ec_dev, msg);
+ if (ret < 0) {
+ dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
+ } else if (msg->result != EC_RES_SUCCESS) {
+ dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
+ return -EPROTO;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 8973d34ce..fb1b56a71 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
u8 *sense = NULL;
int expires;
+ cqr = (struct dasd_ccw_req *) intparm;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
+ if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
+ device = (struct dasd_device *) cqr->startdev;
+ cqr->status = DASD_CQR_CLEARED;
+ dasd_device_clear_timer(device);
+ wake_up(&dasd_flush_wq);
+ dasd_schedule_device_bh(device);
+ return;
+ }
break;
case -ETIMEDOUT:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
@@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
}
now = get_tod_clock();
- cqr = (struct dasd_ccw_req *) intparm;
/* check for conditions that should be handled immediately */
if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 4b3bb52b5..c424e8bc2 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
struct fib *fibptr;
struct hw_fib * hw_fib = (struct hw_fib *)0;
dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
- unsigned size;
+ unsigned int size, osize;
int retval;
if (dev->in_reset) {
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
* will not overrun the buffer when we copy the memory. Return
* an error if we would.
*/
- size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
+ osize = size = le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr);
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
goto cleanup;
}
+ /* Sanity check the second copy */
+ if ((osize != le16_to_cpu(kfib->header.Size) +
+ sizeof(struct aac_fibhdr))
+ || (size < le16_to_cpu(kfib->header.SenderSize))) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2dab3dc2a..c1ed25adb 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- if (pci_request_selected_regions(instance->pdev, instance->bar,
+ if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
"megasas: LSI")) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
return -EBUSY;
@@ -5339,7 +5339,7 @@ fail_ready_state:
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
return -EINVAL;
}
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index ec837544f..52d8bbf7f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_selected_regions(instance->pdev, instance->bar);
+ pci_release_selected_regions(instance->pdev, 1<<instance->bar);
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 751f13ede..750f82c33 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
} else
ioc->msix96_vector = 0;
+ if (ioc->is_warpdrive) {
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
+
+ for (i = 1; i < ioc->cpu_msix_table_sz; i++)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ * 4)));
+ }
+
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
- if (ioc->is_warpdrive) {
- ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
- &ioc->chip->ReplyPostHostIndex;
-
- for (i = 1; i < ioc->cpu_msix_table_sz; i++)
- ioc->reply_post_host_index[i] =
- (resource_size_t __iomem *)
- ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
- * 4)));
- }
-
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
if (r)
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 4ab186669..ec5b9a234 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -56,11 +56,6 @@
#define N_CHANS 8
-enum waveform_state_bits {
- WAVEFORM_AI_RUNNING,
- WAVEFORM_AO_RUNNING
-};
-
/* Data unique to this driver */
struct waveform_private {
struct timer_list ai_timer; /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
unsigned int wf_amplitude; /* waveform amplitude in microvolts */
unsigned int wf_period; /* waveform period in microseconds */
unsigned int wf_current; /* current time in waveform period */
- unsigned long state_bits;
unsigned int ai_scan_period; /* AI scan period in usec */
unsigned int ai_convert_period; /* AI conversion period in usec */
struct timer_list ao_timer; /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
unsigned int nsamples;
unsigned int time_increment;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
- return;
-
now = ktime_to_us(ktime_get());
nsamples = comedi_nsamples_left(s, UINT_MAX);
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
*/
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ai_timer);
return 0;
}
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ai_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ai_timer);
+ } else {
+ del_timer_sync(&devpriv->ai_timer);
+ }
return 0;
}
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
u64 scans_since;
unsigned int scans_avail = 0;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
- return;
-
/* determine number of scan periods since last time */
now = ktime_to_us(ktime_get());
scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ao_timer);
return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ao_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ao_timer);
+ } else {
+ del_timer_sync(&devpriv->ao_timer);
+ }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 2cd5aa687..73a46e3a6 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -636,7 +636,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
const struct daq200_boardtype *board;
int i;
- if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+ if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
return NULL;
for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 8dabb1951..0f97d7b61 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
int i;
static const int timeout = 1000;
- if (trig_num != cmd->start_arg)
+ /*
+ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+ * For backwards compatibility, also allow trig_num == 0 when
+ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+ * in that case, the internal trigger is being used as a pre-trigger
+ * before the external trigger.
+ */
+ if (!(trig_num == cmd->start_arg ||
+ (trig_num == 0 && cmd->start_src != TRIG_INT)))
return -EINVAL;
/*
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = (devpriv->is_m_series) ? 0xffffffff
: 0x00ffffff;
s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
s->insn_config = ni_tio_insn_config;
#ifdef PCIDMA
if (dev->irq && devpriv->mite) {
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 5eba0ebae..86e40ce39 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -391,6 +391,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
struct inode *inode = NULL;
__u64 bits = 0;
int rc = 0;
+ struct dentry *alias;
/* NB 1 request reference will be taken away by ll_intent_lock()
* when I return
@@ -415,26 +416,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
*/
}
- /* Only hash *de if it is unhashed (new dentry).
- * Atoimc_open may passing hashed dentries for open.
- */
- if (d_unhashed(*de)) {
- struct dentry *alias;
-
- alias = ll_splice_alias(inode, *de);
- if (IS_ERR(alias)) {
- rc = PTR_ERR(alias);
- goto out;
- }
- *de = alias;
- } else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
- !it_disposition(it, DISP_OPEN_CREATE)) {
- /* With DISP_OPEN_CREATE dentry will be
- * instantiated in ll_create_it.
- */
- LASSERT(!d_inode(*de));
- d_instantiate(*de, inode);
+ alias = ll_splice_alias(inode, *de);
+ if (IS_ERR(alias)) {
+ rc = PTR_ERR(alias);
+ goto out;
}
+ *de = alias;
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
/* we have lookup look - unhide dentry */
@@ -590,6 +577,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
*opened);
+ /* Only negative dentries enter here */
+ LASSERT(!d_inode(dentry));
+
+ if (!d_in_lookup(dentry)) {
+ /* A valid negative dentry that just passed revalidation,
+ * there's little point to try and open it server-side,
+ * even though there's a minuscle chance it might succeed.
+ * Either way it's a valid race to just return -ENOENT here.
+ */
+ if (!(open_flags & O_CREAT))
+ return -ENOENT;
+
+ /* Otherwise we just unhash it to be rehashed afresh via
+ * lookup if necessary
+ */
+ d_drop(dentry);
+ }
+
it = kzalloc(sizeof(*it), GFP_NOFS);
if (!it)
return -ENOMEM;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 065f5d97a..dfec5a176 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1596,8 +1596,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
- /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
- if (ci_otg_is_fsm_mode(ci))
+ /*
+ * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+ * and don't touch Data+ in host mode for dual role config.
+ */
+ if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
return 0;
pm_runtime_get_sync(&ci->gadget.dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 94a14f5dc..0a4d54a87 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1405,7 +1405,6 @@ made_compressed_probe:
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
acm->is_int_ep = usb_endpoint_xfer_int(epread);
if (acm->is_int_ep)
acm->bInterval = epread->bInterval;
@@ -1445,14 +1444,14 @@ made_compressed_probe:
urb->transfer_dma = rb->dma;
if (acm->is_int_ep) {
usb_fill_int_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb,
acm->bInterval);
} else {
usb_fill_bulk_urb(urb, acm->dev,
- acm->rx_endpoint,
+ usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
rb->base,
acm->readsize,
acm_read_bulk_callback, rb);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 05ce308d5..1f1eabfd8 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
struct acm_rb read_buffers[ACM_NR];
struct acm_wb *putbuffer; /* for acm_tty_put_char() */
int rx_buflimit;
- int rx_endpoint;
spinlock_t read_lock;
int write_used; /* number of non-empty write buffers */
int transmitting;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 31ccdccd7..15ce4ab11 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep, buffer, size);
}
+static const unsigned short low_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 8,
+ [USB_ENDPOINT_XFER_ISOC] = 0,
+ [USB_ENDPOINT_XFER_BULK] = 0,
+ [USB_ENDPOINT_XFER_INT] = 8,
+};
+static const unsigned short full_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1023,
+ [USB_ENDPOINT_XFER_BULK] = 64,
+ [USB_ENDPOINT_XFER_INT] = 64,
+};
+static const unsigned short high_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 64,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 512,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+static const unsigned short super_speed_maxpacket_maxes[4] = {
+ [USB_ENDPOINT_XFER_CONTROL] = 512,
+ [USB_ENDPOINT_XFER_ISOC] = 1024,
+ [USB_ENDPOINT_XFER_BULK] = 1024,
+ [USB_ENDPOINT_XFER_INT] = 1024,
+};
+
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
int asnum, struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
int n, i, j, retval;
+ unsigned int maxp;
+ const unsigned short *maxpacket_maxes;
d = (struct usb_endpoint_descriptor *) buffer;
buffer += d->bLength;
@@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
+ /* Validate the wMaxPacketSize field */
+ maxp = usb_endpoint_maxp(&endpoint->desc);
+
+ /* Find the highest legal maxpacket size for this endpoint */
+ i = 0; /* additional transactions per microframe */
+ switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_LOW:
+ maxpacket_maxes = low_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_FULL:
+ maxpacket_maxes = full_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_HIGH:
+ /* Bits 12..11 are allowed only for HS periodic endpoints */
+ if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
+ i = maxp & (BIT(12) | BIT(11));
+ maxp &= ~i;
+ }
+ /* fallthrough */
+ default:
+ maxpacket_maxes = high_speed_maxpacket_maxes;
+ break;
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ maxpacket_maxes = super_speed_maxpacket_maxes;
+ break;
+ }
+ j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
+
+ if (maxp > j) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
+ cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
+ maxp = j;
+ endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
+ }
+
/*
* Some buggy high speed devices have bulk endpoints using
* maxpacket sizes other than 512. High speed HCDs may not
@@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
*/
if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
&& usb_endpoint_xfer_bulk(d)) {
- unsigned maxp;
-
- maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff;
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9f5043a2..50b6baa50 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem;
}
- mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle);
+ mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
+ &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
@@ -1708,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
- if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
- ps->dev->speed == USB_SPEED_HIGH)
- as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
- else
- as->urb->interval = ep->desc.bInterval;
+
+ if (ep->desc.bInterval) {
+ if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+ ps->dev->speed == USB_SPEED_HIGH ||
+ ps->dev->speed >= USB_SPEED_SUPER)
+ as->urb->interval = 1 <<
+ min(15, ep->desc.bInterval - 1);
+ else
+ as->urb->interval = ep->desc.bInterval;
+ }
+
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < number_of_packets; u++) {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bee135176..1d5fc32d0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Continue a partial initialization */
if (type == HUB_INIT2 || type == HUB_INIT3) {
- device_lock(hub->intfdev);
+ device_lock(&hdev->dev);
/* Was the hub disconnected while we were waiting? */
- if (hub->disconnected) {
- device_unlock(hub->intfdev);
- kref_put(&hub->kref, hub_release);
- return;
- }
+ if (hub->disconnected)
+ goto disconnected;
if (type == HUB_INIT2)
goto init2;
goto init3;
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
return; /* Continues at init3: below */
} else {
msleep(delay);
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Scan all ports that need attention */
kick_hub_wq(hub);
- /* Allow autosuspend if it was suppressed */
- if (type <= HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ /* Allow autosuspend if it was suppressed */
+ disconnected:
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
-
- if (type == HUB_INIT2 || type == HUB_INIT3)
- device_unlock(hub->intfdev);
+ device_unlock(&hdev->dev);
+ }
kref_put(&hub->kref, hub_release);
}
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
struct usb_device *hdev = hub->hdev;
int i;
- cancel_delayed_work_sync(&hub->init_work);
-
/* hub_wq and related activity won't re-trigger */
hub->quiescing = 1;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 14196cd41..2fd50578b 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -37,6 +37,7 @@
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
+#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -214,6 +215,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 716f4f051..05a5300aa 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1897,7 +1897,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_request *req, struct dwc3_trb *trb,
- const struct dwc3_event_depevt *event, int status)
+ const struct dwc3_event_depevt *event, int status,
+ int chain)
{
unsigned int count;
unsigned int s_pkt = 0;
@@ -1905,6 +1906,19 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
trace_dwc3_complete_trb(dep, trb);
+ /*
+ * If we're in the middle of series of chained TRBs and we
+ * receive a short transfer along the way, DWC3 will skip
+ * through all TRBs including the last TRB in the chain (the
+ * where CHN bit is zero. DWC3 will also avoid clearing HWO
+ * bit and SW has to do it manually.
+ *
+ * We're going to do that here to avoid problems of HW trying
+ * to use bogus TRBs for transfers.
+ */
+ if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
/*
* We continue despite the error. There is not much we
@@ -1916,6 +1930,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
*/
dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
dep->name, trb);
+
count = trb->size & DWC3_TRB_SIZE_MASK;
if (dep->direction) {
@@ -1954,15 +1969,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
s_pkt = 1;
}
- /*
- * We assume here we will always receive the entire data block
- * which we should receive. Meaning, if we program RX to
- * receive 4K but we receive only 2K, we assume that's all we
- * should receive and we simply bounce the request back to the
- * gadget driver for further processing.
- */
- req->request.actual += req->request.length - count;
- if (s_pkt)
+ if (s_pkt && !chain)
return 1;
if ((event->status & DEPEVT_STATUS_LST) &&
(trb->ctrl & (DWC3_TRB_CTRL_LST |
@@ -1981,13 +1988,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
struct dwc3_trb *trb;
unsigned int slot;
unsigned int i;
+ int count = 0;
int ret;
do {
+ int chain;
+
req = next_request(&dep->started_list);
if (WARN_ON_ONCE(!req))
return 1;
+ chain = req->request.num_mapped_sgs > 0;
i = 0;
do {
slot = req->first_trb_index + i;
@@ -1995,13 +2006,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
slot++;
slot %= DWC3_TRB_NUM;
trb = &dep->trb_pool[slot];
+ count += trb->size & DWC3_TRB_SIZE_MASK;
ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
- event, status);
+ event, status, chain);
if (ret)
break;
} while (++i < req->request.num_mapped_sgs);
+ /*
+ * We assume here we will always receive the entire data block
+ * which we should receive. Meaning, if we program RX to
+ * receive 4K but we receive only 2K, we assume that's all we
+ * should receive and we simply bounce the request back to the
+ * gadget driver for further processing.
+ */
+ req->request.actual += req->request.length - count;
dwc3_gadget_giveback(dep, req, status);
if (ret)
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index aa3707bde..be6479830 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb,
*/
spin_lock_irq(&epdata->dev->lock);
value = -ENODEV;
- if (unlikely(epdata->ep))
+ if (unlikely(epdata->ep == NULL))
goto fail;
req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 93d28cb00..cf8819a5c 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc,
struct qe_ep *ep;
if (wValue != 0 || wLength != 0
- || pipe > USB_MAX_ENDPOINTS)
+ || pipe >= USB_MAX_ENDPOINTS)
break;
ep = &udc->eps[pipe];
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index e1b2dcebd..bd4695075 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -106,7 +106,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
return;
if (req->num_mapped_sgs) {
- dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+ dma_unmap_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index a962b89b6..1e5f529d5 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
int port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
- ehci_writel(ehci, PORT_RWC_BITS,
- &ehci->regs->port_status[port]);
spin_unlock_irq(&ehci->lock);
ehci_port_power(ehci, port, false);
spin_lock_irq(&ehci->lock);
+ ehci_writel(ehci, PORT_RWC_BITS,
+ &ehci->regs->port_status[port]);
}
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index d61fcc480..730b9fd26 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = 0;
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return -ENODEV;
+
cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
if (!cmd) {
xhci_dbg(xhci, "Couldn't allocate command structure.\n");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c10972fcc..69f7fabdb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev)
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
}
- usb_hcd_pci_remove(dev);
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
pci_set_power_state(dev, PCI_D3hot);
+
+ usb_hcd_pci_remove(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d7d502578..bc17bcf57 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1325,12 +1325,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
- if (cmd->command_trb != xhci->cmd_ring->dequeue) {
- xhci_err(xhci,
- "Command completion event does not match command\n");
- return;
- }
-
del_timer(&xhci->cmd_timer);
trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
@@ -1342,6 +1336,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_handle_stopped_cmd_ring(xhci, cmd);
return;
}
+
+ if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+ xhci_err(xhci,
+ "Command completion event does not match command\n");
+ return;
+ }
+
/*
* Host aborted the command ring, check if the current command was
* supposed to be aborted, otherwise continue normally.
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 6b978f04b..5c8210dc6 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req)
{
struct usb_sg_request *req = (struct usb_sg_request *) _req;
- req->status = -ETIMEDOUT;
usb_sg_cancel(req);
}
@@ -616,8 +615,10 @@ static int perform_sglist(
mod_timer(&sg_timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
- del_timer_sync(&sg_timer);
- retval = req->status;
+ if (!del_timer_sync(&sg_timer))
+ retval = -ETIMEDOUT;
+ else
+ retval = req->status;
/* FIXME check resulting data pattern */
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
ktime_get_ts64(&start);
retval = usbtest_do_ioctl(intf, param_32);
- if (retval)
+ if (retval < 0)
goto free_mutex;
ktime_get_ts64(&end);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index baeb7d23b..8c81aac09 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
if (gpio > 0)
dparam->enable_gpio = gpio;
- if (dparam->type == USBHS_TYPE_RCAR_GEN2)
+ if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
+ dparam->type == USBHS_TYPE_RCAR_GEN3)
dparam->has_usb_dmac = 1;
return info;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 280ed5ff0..857e78337 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_push;
/* check data length if this driver don't use USB-DMAC */
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
/* use PIO if packet is less than pio_dma_border or pipe is DCP */
if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
- usbhs_pipe_is_dcp(pipe))
+ usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
goto usbhsf_pio_prepare_pop;
fifo = usbhsf_get_dma_fifo(priv, pkt);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 50f3363cc..c4c64740a 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
* use dmaengine if possible.
* It will use pio handler if impossible.
*/
- if (usb_endpoint_dir_in(desc))
+ if (usb_endpoint_dir_in(desc)) {
pipe->handler = &usbhs_fifo_dma_push_handler;
- else
+ } else {
pipe->handler = &usbhs_fifo_dma_pop_handler;
+ usbhs_xxxsts_clear(priv, BRDYSTS,
+ usbhs_pipe_number(pipe));
+ }
ret = 0;
}
@@ -1073,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
- gpriv->transceiver ? "" : "no ");
+ !IS_ERR(gpriv->transceiver) ? "" : "no ");
/*
* CAUTION
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 008208091..b2d767e74 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c5d6c1e73..f87a938cf 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -406,6 +406,12 @@
#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
/*
+ * Ivium Technologies product IDs
+ */
+#define FTDI_PALMSENS_PID 0xf440
+#define FTDI_IVIUM_XSTAT_PID 0xf441
+
+/*
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
@@ -673,6 +679,12 @@
#define INTREPID_NEOVI_PID 0x0701
/*
+ * WICED USB UART
+ */
+#define WICED_VID 0x0A5C
+#define WICED_USB20706V2_PID 0x6422
+
+/*
* Definitions for ID TECH (www.idt-net.com) devices
*/
#define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 5608af4a3..de9992b49 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ed378fb23..57426d703 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
}
if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8e07536c2..9894e341c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb);
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
+#define TELIT_PRODUCT_LE920A4_1207 0x1207
+#define TELIT_PRODUCT_LE920A4_1208 0x1208
+#define TELIT_PRODUCT_LE920A4_1211 0x1211
+#define TELIT_PRODUCT_LE920A4_1212 0x1212
+#define TELIT_PRODUCT_LE920A4_1213 0x1213
+#define TELIT_PRODUCT_LE920A4_1214 0x1214
/* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2
@@ -519,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
#define VIATELECOM_VENDOR_ID 0x15eb
#define VIATELECOM_PRODUCT_CDS7 0x0001
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID 0x22de
+#define WETELECOM_PRODUCT_WMD200 0x6801
+#define WETELECOM_PRODUCT_6802 0x6802
+#define WETELECOM_PRODUCT_WMD300 0x6803
+
struct option_blacklist_info {
/* bitmask of interface numbers blacklisted for send_setup */
const unsigned long sendsetup;
@@ -628,6 +640,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1),
+};
+
static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
.sendsetup = BIT(2),
.reserved = BIT(0) | BIT(1) | BIT(3),
@@ -1203,6 +1220,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+ .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -1966,9 +1993,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index b1b9bac44..d213cf44a 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
rc = usb_register(udriver);
if (rc)
- return rc;
+ goto failed_usb_register;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
+failed_usb_register:
+ kfree(udriver);
return rc;
}
EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 15ecfc9c5..152b43822 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -564,67 +564,80 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
}
static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
- uint32_t flags, void *data)
+ unsigned int count, uint32_t flags,
+ void *data)
{
- int32_t fd = *(int32_t *)data;
-
- if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
- return -EINVAL;
-
/* DATA_NONE/DATA_BOOL enables loopback testing */
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- if (*ctx)
- eventfd_signal(*ctx, 1);
- return 0;
+ if (*ctx) {
+ if (count) {
+ eventfd_signal(*ctx, 1);
+ } else {
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ }
+ return 0;
+ }
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
- uint8_t trigger = *(uint8_t *)data;
+ uint8_t trigger;
+
+ if (!count)
+ return -EINVAL;
+
+ trigger = *(uint8_t *)data;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
- return 0;
- }
- /* Handle SET_DATA_EVENTFD */
- if (fd == -1) {
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = NULL;
return 0;
- } else if (fd >= 0) {
- struct eventfd_ctx *efdctx;
- efdctx = eventfd_ctx_fdget(fd);
- if (IS_ERR(efdctx))
- return PTR_ERR(efdctx);
- if (*ctx)
- eventfd_ctx_put(*ctx);
- *ctx = efdctx;
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd;
+
+ if (!count)
+ return -EINVAL;
+
+ fd = *(int32_t *)data;
+ if (fd == -1) {
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+ *ctx = NULL;
+ } else if (fd >= 0) {
+ struct eventfd_ctx *efdctx;
+
+ efdctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efdctx))
+ return PTR_ERR(efdctx);
+
+ if (*ctx)
+ eventfd_ctx_put(*ctx);
+
+ *ctx = efdctx;
+ }
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_ERR_IRQ_INDEX)
+ if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- /*
- * We should sanitize start & count, but that wasn't caught
- * originally, so this IRQ index must forever ignore them :-(
- */
-
- return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
+ count, flags, data);
}
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
unsigned index, unsigned start,
unsigned count, uint32_t flags, void *data)
{
- if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+ if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
return -EINVAL;
- return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+ return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
+ count, flags, data);
}
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ca6bfddaa..2ebf30e57 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -316,6 +316,8 @@ static inline int virtqueue_add(struct virtqueue *_vq,
* host should service the ring ASAP. */
if (out_sgs)
vq->notify(&vq->vq);
+ if (indirect)
+ kfree(desc);
END_USE(vq);
return -ENOSPC;
}
diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c
index 80b018f57..3bfbe5b51 100644
--- a/fs/aufs/branch.c
+++ b/fs/aufs/branch.c
@@ -141,12 +141,12 @@ static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
goto out_wbr;
}
- err = au_sbr_realloc(au_sbi(sb), new_nbranch);
+ err = au_sbr_realloc(au_sbi(sb), new_nbranch, /*may_shrink*/0);
if (!err)
- err = au_di_realloc(au_di(root), new_nbranch);
+ err = au_di_realloc(au_di(root), new_nbranch, /*may_shrink*/0);
if (!err) {
inode = d_inode(root);
- err = au_hinode_realloc(au_ii(inode), new_nbranch);
+ err = au_hinode_realloc(au_ii(inode), new_nbranch, /*may_shrink*/0);
}
if (!err)
return add_branch; /* success */
@@ -890,7 +890,8 @@ static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
sbinfo->si_branch[0 + bbot] = NULL;
sbinfo->si_bbot--;
- p = krealloc(sbinfo->si_branch, sizeof(*p) * bbot, AuGFP_SBILIST);
+ p = au_krealloc(sbinfo->si_branch, sizeof(*p) * bbot, AuGFP_SBILIST,
+ /*may_shrink*/1);
if (p)
sbinfo->si_branch = p;
/* harmless error */
@@ -909,7 +910,8 @@ static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
/* au_h_dentry_init(au_hdentry(dinfo, bbot); */
dinfo->di_bbot--;
- p = krealloc(dinfo->di_hdentry, sizeof(*p) * bbot, AuGFP_SBILIST);
+ p = au_krealloc(dinfo->di_hdentry, sizeof(*p) * bbot, AuGFP_SBILIST,
+ /*may_shrink*/1);
if (p)
dinfo->di_hdentry = p;
/* harmless error */
@@ -928,7 +930,8 @@ static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
/* au_hinode_init(au_hinode(iinfo, bbot)); */
iinfo->ii_bbot--;
- p = krealloc(iinfo->ii_hinode, sizeof(*p) * bbot, AuGFP_SBILIST);
+ p = au_krealloc(iinfo->ii_hinode, sizeof(*p) * bbot, AuGFP_SBILIST,
+ /*may_shrink*/1);
if (p)
iinfo->ii_hinode = p;
/* harmless error */
diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c
index f85a813a4..4db01e853 100644
--- a/fs/aufs/dbgaufs.c
+++ b/fs/aufs/dbgaufs.c
@@ -256,7 +256,10 @@ void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
for (; bindex <= bbot; bindex++) {
br = au_sbr(sb, bindex);
xi = &br->br_xino;
+ /* debugfs acquires the parent i_mutex */
+ lockdep_off();
debugfs_remove(xi->xi_dbgaufs);
+ lockdep_on();
xi->xi_dbgaufs = NULL;
}
}
diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c
index 09bc36af7..5d4639838 100644
--- a/fs/aufs/dcsub.c
+++ b/fs/aufs/dcsub.c
@@ -69,7 +69,8 @@ static int au_dpages_append(struct au_dcsub_pages *dpages,
err = -ENOMEM;
sz = dpages->ndpage * sizeof(*dpages->dpages);
p = au_kzrealloc(dpages->dpages, sz,
- sz + sizeof(*dpages->dpages), gfp);
+ sz + sizeof(*dpages->dpages), gfp,
+ /*may_shrink*/0);
if (unlikely(!p))
goto out;
diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c
index 51af2cc72..5eb4f3620 100644
--- a/fs/aufs/dentry.c
+++ b/fs/aufs/dentry.c
@@ -36,7 +36,7 @@ au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
br = au_sbr(dentry->d_sb, bindex);
wh_able = !!au_br_whable(br->br_perm);
if (wh_able)
- wh_found = au_wh_test(h_parent, wh_name, /*try_sio*/0);
+ wh_found = au_wh_test(h_parent, wh_name, ignore_perm);
h_dentry = ERR_PTR(wh_found);
if (!wh_found)
goto real_lookup;
@@ -701,7 +701,7 @@ void au_refresh_dop(struct dentry *dentry, int force_reval)
int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
{
- int err, ebrange;
+ int err, ebrange, nbr;
unsigned int sigen;
struct au_dinfo *dinfo, *tmp;
struct super_block *sb;
@@ -717,8 +717,9 @@ int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
if (unlikely(err))
goto out;
+ nbr = au_sbbot(sb) + 1;
dinfo = au_di(dentry);
- err = au_di_realloc(dinfo, au_sbbot(sb) + 1);
+ err = au_di_realloc(dinfo, nbr, /*may_shrink*/0);
if (unlikely(err))
goto out;
ebrange = au_dbrange_test(dentry);
@@ -761,6 +762,7 @@ int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
au_dbg_verify_dinode(dentry);
AuTraceErr(err);
}
+ au_di_realloc(dinfo, nbr, /*may_shrink*/1); /* harmless if err */
au_rw_write_unlock(&tmp->di_rwsem);
au_di_free(tmp);
if (unlikely(err))
diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h
index dc027191c..01211d220 100644
--- a/fs/aufs/dentry.h
+++ b/fs/aufs/dentry.h
@@ -66,7 +66,7 @@ void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
int au_di_init(struct dentry *dentry);
void au_di_fin(struct dentry *dentry);
-int au_di_realloc(struct au_dinfo *dinfo, int nbr);
+int au_di_realloc(struct au_dinfo *dinfo, int nbr, int may_shrink);
void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
void di_read_unlock(struct dentry *d, int flags);
diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c
index 361e86142..2626c0cca 100644
--- a/fs/aufs/dinfo.c
+++ b/fs/aufs/dinfo.c
@@ -129,7 +129,7 @@ void au_di_fin(struct dentry *dentry)
au_di_free(dinfo);
}
-int au_di_realloc(struct au_dinfo *dinfo, int nbr)
+int au_di_realloc(struct au_dinfo *dinfo, int nbr, int may_shrink)
{
int err, sz;
struct au_hdentry *hdp;
@@ -140,7 +140,8 @@ int au_di_realloc(struct au_dinfo *dinfo, int nbr)
sz = sizeof(*hdp) * (dinfo->di_bbot + 1);
if (!sz)
sz = sizeof(*hdp);
- hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS);
+ hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS,
+ may_shrink);
if (hdp) {
dinfo->di_hdentry = hdp;
err = 0;
diff --git a/fs/aufs/file.c b/fs/aufs/file.c
index 429b56840..8d5ca6f7f 100644
--- a/fs/aufs/file.c
+++ b/fs/aufs/file.c
@@ -655,23 +655,26 @@ static void au_do_refresh_dir(struct file *file)
*/
static int refresh_file(struct file *file, int (*reopen)(struct file *file))
{
- int err, need_reopen;
+ int err, need_reopen, nbr;
aufs_bindex_t bbot, bindex;
struct dentry *dentry;
+ struct super_block *sb;
struct au_finfo *finfo;
struct au_hfile *hfile;
dentry = file->f_path.dentry;
+ sb = dentry->d_sb;
+ nbr = au_sbbot(sb) + 1;
finfo = au_fi(file);
if (!finfo->fi_hdir) {
hfile = &finfo->fi_htop;
AuDebugOn(!hfile->hf_file);
- bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id);
+ bindex = au_br_index(sb, hfile->hf_br->br_id);
AuDebugOn(bindex < 0);
if (bindex != finfo->fi_btop)
au_set_fbtop(file, bindex);
} else {
- err = au_fidir_realloc(finfo, au_sbbot(dentry->d_sb) + 1);
+ err = au_fidir_realloc(finfo, nbr, /*may_shrink*/0);
if (unlikely(err))
goto out;
au_do_refresh_dir(file);
@@ -681,6 +684,9 @@ static int refresh_file(struct file *file, int (*reopen)(struct file *file))
need_reopen = 1;
if (!au_test_mmapped(file))
err = au_file_refresh_by_inode(file, &need_reopen);
+ if (finfo->fi_hdir)
+ /* harmless if err */
+ au_fidir_realloc(finfo, nbr, /*may_shrink*/1);
if (!err && need_reopen && !d_unlinked(dentry))
err = reopen(file);
if (!err) {
diff --git a/fs/aufs/file.h b/fs/aufs/file.h
index 6ecd0df33..f53b381a7 100644
--- a/fs/aufs/file.h
+++ b/fs/aufs/file.h
@@ -110,7 +110,7 @@ void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
void au_update_figen(struct file *file);
struct au_fidir *au_fidir_alloc(struct super_block *sb);
-int au_fidir_realloc(struct au_finfo *finfo, int nbr);
+int au_fidir_realloc(struct au_finfo *finfo, int nbr, int may_shrink);
void au_fi_init_once(void *_fi);
void au_finfo_fin(struct file *file, int atonce);
diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c
index e0c16b2f1..0f016ed54 100644
--- a/fs/aufs/finfo.c
+++ b/fs/aufs/finfo.c
@@ -66,7 +66,7 @@ struct au_fidir *au_fidir_alloc(struct super_block *sb)
return fidir;
}
-int au_fidir_realloc(struct au_finfo *finfo, int nbr)
+int au_fidir_realloc(struct au_finfo *finfo, int nbr, int may_shrink)
{
int err;
struct au_fidir *fidir, *p;
@@ -77,7 +77,7 @@ int au_fidir_realloc(struct au_finfo *finfo, int nbr)
err = -ENOMEM;
p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
- GFP_NOFS);
+ GFP_NOFS, may_shrink);
if (p) {
p->fd_nent = nbr;
finfo->fi_hdir = p;
diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c
index c83db695d..975cff389 100644
--- a/fs/aufs/iinfo.c
+++ b/fs/aufs/iinfo.c
@@ -205,7 +205,7 @@ int au_iinfo_init(struct inode *inode)
return -ENOMEM;
}
-int au_hinode_realloc(struct au_iinfo *iinfo, int nbr)
+int au_hinode_realloc(struct au_iinfo *iinfo, int nbr, int may_shrink)
{
int err, i;
struct au_hinode *hip;
@@ -213,7 +213,8 @@ int au_hinode_realloc(struct au_iinfo *iinfo, int nbr)
AuRwMustWriteLock(&iinfo->ii_rwsem);
err = -ENOMEM;
- hip = krealloc(iinfo->ii_hinode, sizeof(*hip) * nbr, GFP_NOFS);
+ hip = au_krealloc(iinfo->ii_hinode, sizeof(*hip) * nbr, GFP_NOFS,
+ may_shrink);
if (hip) {
iinfo->ii_hinode = hip;
i = iinfo->ii_bbot + 1;
diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c
index a4b994e8c..8f7446df6 100644
--- a/fs/aufs/inode.c
+++ b/fs/aufs/inode.c
@@ -27,7 +27,7 @@ static void au_refresh_hinode_attr(struct inode *inode, int do_version)
static int au_ii_refresh(struct inode *inode, int *update)
{
- int err, e;
+ int err, e, nbr;
umode_t type;
aufs_bindex_t bindex, new_bindex;
struct super_block *sb;
@@ -39,9 +39,10 @@ static int au_ii_refresh(struct inode *inode, int *update)
*update = 0;
sb = inode->i_sb;
+ nbr = au_sbbot(sb) + 1;
type = inode->i_mode & S_IFMT;
iinfo = au_ii(inode);
- err = au_hinode_realloc(iinfo, au_sbbot(sb) + 1);
+ err = au_hinode_realloc(iinfo, nbr, /*may_shrink*/0);
if (unlikely(err))
goto out;
@@ -79,6 +80,7 @@ static int au_ii_refresh(struct inode *inode, int *update)
}
}
au_update_ibrange(inode, /*do_put_zero*/0);
+ au_hinode_realloc(iinfo, nbr, /*may_shrink*/1); /* harmless if err */
e = au_dy_irefresh(inode);
if (unlikely(e && !err))
err = e;
diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h
index 60995c054..33d9f5e8f 100644
--- a/fs/aufs/inode.h
+++ b/fs/aufs/inode.h
@@ -258,7 +258,7 @@ void au_icntnr_init_once(void *_c);
void au_hinode_init(struct au_hinode *hinode);
int au_iinfo_init(struct inode *inode);
void au_iinfo_fin(struct inode *inode);
-int au_hinode_realloc(struct au_iinfo *iinfo, int nbr);
+int au_hinode_realloc(struct au_iinfo *iinfo, int nbr, int may_shrink);
#ifdef CONFIG_PROC_FS
/* plink.c */
diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c
index e92a34599..c3ca50f16 100644
--- a/fs/aufs/loop.c
+++ b/fs/aufs/loop.c
@@ -91,7 +91,8 @@ void au_warn_loopback(struct super_block *h_sb)
new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
a = au_kzrealloc(au_warn_loopback_array,
au_warn_loopback_nelem * sizeof(unsigned long),
- new_nelem * sizeof(unsigned long), GFP_ATOMIC);
+ new_nelem * sizeof(unsigned long), GFP_ATOMIC,
+ /*may_shrink*/0);
if (a) {
au_warn_loopback_nelem = new_nelem;
au_warn_loopback_array = a;
diff --git a/fs/aufs/module.c b/fs/aufs/module.c
index ede9a232a..5092b4bd6 100644
--- a/fs/aufs/module.c
+++ b/fs/aufs/module.c
@@ -10,13 +10,57 @@
#include <linux/seq_file.h>
#include "aufs.h"
-void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp)
+/* shrinkable realloc */
+void *au_krealloc(void *p, unsigned int new_sz, gfp_t gfp, int may_shrink)
{
- if (new_sz <= nused)
- return p;
+ size_t sz;
+ int diff;
+
+ sz = 0;
+ diff = -1;
+ if (p) {
+#if 0 /* unused */
+ if (!new_sz) {
+ au_delayed_kfree(p);
+ p = NULL;
+ goto out;
+ }
+#else
+ AuDebugOn(!new_sz);
+#endif
+ sz = ksize(p);
+ diff = au_kmidx_sub(sz, new_sz);
+ }
+ if (sz && !diff)
+ goto out;
+
+ if (sz < new_sz)
+ /* expand or SLOB */
+ p = krealloc(p, new_sz, gfp);
+ else if (new_sz < sz && may_shrink) {
+ /* shrink */
+ void *q;
+
+ q = kmalloc(new_sz, gfp);
+ if (q) {
+ if (p) {
+ memcpy(q, p, new_sz);
+ au_delayed_kfree(p);
+ }
+ p = q;
+ } else
+ p = NULL;
+ }
- p = krealloc(p, new_sz, gfp);
- if (p)
+out:
+ return p;
+}
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp,
+ int may_shrink)
+{
+ p = au_krealloc(p, new_sz, gfp, may_shrink);
+ if (p && new_sz > nused)
memset(p + nused, 0, new_sz - nused);
return p;
}
@@ -38,9 +82,9 @@ static void au_do_dfree(struct work_struct *work __maybe_unused)
head = &au_dfree.cache[AuCache_##idx].llist; \
node = llist_del_all(head); \
for (; node; node = next) { \
- struct au_##name *p = \
- p = llist_entry(node, struct au_##name, \
- lnode); \
+ struct au_##name *p \
+ = llist_entry(node, struct au_##name, \
+ lnode); \
next = llist_next(node); \
au_cache_free_##name(p); \
} \
diff --git a/fs/aufs/module.h b/fs/aufs/module.h
index 681dc75f5..6f968ba41 100644
--- a/fs/aufs/module.h
+++ b/fs/aufs/module.h
@@ -25,7 +25,19 @@ extern bool au_userns;
extern int au_dir_roflags;
-void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp);
+void *au_krealloc(void *p, unsigned int new_sz, gfp_t gfp, int may_shrink);
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp,
+ int may_shrink);
+
+static inline int au_kmidx_sub(size_t sz, size_t new_sz)
+{
+#ifndef CONFIG_SLOB
+ return kmalloc_index(sz) - kmalloc_index(new_sz);
+#else
+ return -1; /* SLOB is untested */
+#endif
+}
+
int au_seq_path(struct seq_file *seq, struct path *path);
#ifdef CONFIG_PROC_FS
diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c
index 25f60549c..e113b5119 100644
--- a/fs/aufs/sbinfo.c
+++ b/fs/aufs/sbinfo.c
@@ -118,7 +118,7 @@ out:
return err;
}
-int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr, int may_shrink)
{
int err, sz;
struct au_branch **brp;
@@ -129,7 +129,8 @@ int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
sz = sizeof(*brp) * (sbinfo->si_bbot + 1);
if (unlikely(!sz))
sz = sizeof(*brp);
- brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS);
+ brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS,
+ may_shrink);
if (brp) {
sbinfo->si_branch = brp;
err = 0;
diff --git a/fs/aufs/super.h b/fs/aufs/super.h
index b8ca14994..e4ac866e7 100644
--- a/fs/aufs/super.h
+++ b/fs/aufs/super.h
@@ -266,7 +266,7 @@ void au_iarray_free(struct inode **a, unsigned long long max);
/* sbinfo.c */
void au_si_free(struct kobject *kobj);
int au_si_alloc(struct super_block *sb);
-int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr);
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr, int may_shrink);
unsigned int au_sigen_inc(struct super_block *sb);
aufs_bindex_t au_new_br_id(struct super_block *sb);
diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c
index e6e3a1eaa..b2eb4c058 100644
--- a/fs/aufs/vdir.c
+++ b/fs/aufs/vdir.c
@@ -266,8 +266,8 @@ static int append_deblk(struct au_vdir *vdir)
unsigned char **o;
err = -ENOMEM;
- o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
- GFP_NOFS);
+ o = au_krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
+ GFP_NOFS, /*may_shrink*/0);
if (unlikely(!o))
goto out;
@@ -690,8 +690,8 @@ static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
if (tgt->vd_nblk < src->vd_nblk) {
unsigned char **p;
- p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
- GFP_NOFS);
+ p = au_krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
+ GFP_NOFS, /*may_shrink*/0);
if (unlikely(!p))
goto out;
tgt->vd_deblk = p;
@@ -701,7 +701,8 @@ static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
unsigned char *p;
tgt->vd_deblk_sz = deblk_sz;
- p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS);
+ p = au_krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS,
+ /*may_shrink*/1);
if (unlikely(!p))
goto out;
tgt->vd_deblk[0] = p;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4274a7bfd..72f50480e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1040,6 +1040,7 @@ struct btrfs_fs_info {
struct btrfs_workqueue *qgroup_rescan_workers;
struct completion qgroup_rescan_completion;
struct btrfs_work qgroup_rescan_work;
+ bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
/* filesystem state */
unsigned long fs_state;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 60ce11903..864cf3be0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1626,8 +1626,8 @@ fail:
return ret;
}
-static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_id)
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ u64 root_id)
{
struct btrfs_root *root;
@@ -2306,6 +2306,7 @@ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
fs_info->qgroup_ulist = NULL;
+ fs_info->qgroup_rescan_running = false;
mutex_init(&fs_info->qgroup_rescan_lock);
}
@@ -3849,7 +3850,7 @@ void close_ctree(struct btrfs_root *root)
smp_mb();
/* wait for the qgroup rescan worker to stop */
- btrfs_qgroup_wait_for_completion(fs_info);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
/* wait for the uuid_scan task to finish */
down(&fs_info->uuid_tree_rescan_sem);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index acba82149..355e31f90 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -68,6 +68,8 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root);
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ u64 root_id);
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 05173563e..3722a1f65 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5088,7 +5088,7 @@ static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return btrfs_qgroup_wait_for_completion(root->fs_info);
+ return btrfs_qgroup_wait_for_completion(root->fs_info, true);
}
static long _btrfs_ioctl_set_received_subvol(struct file *file,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9d4c05b14..4904ebee4 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -995,7 +995,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
goto out;
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
- btrfs_qgroup_wait_for_completion(fs_info);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
@@ -2302,6 +2302,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
int err = -ENOMEM;
int ret = 0;
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = true;
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
path = btrfs_alloc_path();
if (!path)
goto out;
@@ -2368,6 +2372,9 @@ out:
}
done:
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ fs_info->qgroup_rescan_running = false;
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
complete_all(&fs_info->qgroup_rescan_completion);
}
@@ -2486,20 +2493,26 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
return 0;
}
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+ bool interruptible)
{
int running;
int ret = 0;
mutex_lock(&fs_info->qgroup_rescan_lock);
spin_lock(&fs_info->qgroup_lock);
- running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ running = fs_info->qgroup_rescan_running;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
- if (running)
+ if (!running)
+ return 0;
+
+ if (interruptible)
ret = wait_for_completion_interruptible(
&fs_info->qgroup_rescan_completion);
+ else
+ wait_for_completion(&fs_info->qgroup_rescan_completion);
return ret;
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index ecb2c143e..3d73e4c9c 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -46,7 +46,8 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
-int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
+ bool interruptible);
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f1c30861d..3454aa4fa 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -272,6 +272,23 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
root_key.objectid = key.offset;
key.offset++;
+ /*
+ * The root might have been inserted already, as before we look
+ * for orphan roots, log replay might have happened, which
+ * triggers a transaction commit and qgroup accounting, which
+ * in turn reads and inserts fs roots while doing backref
+ * walking.
+ */
+ root = btrfs_lookup_fs_root(tree_root->fs_info,
+ root_key.objectid);
+ if (root) {
+ WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
+ &root->state));
+ if (btrfs_root_refs(&root->root_item) == 0)
+ btrfs_add_dead_root(root);
+ continue;
+ }
+
root = btrfs_read_fs_root(tree_root, &root_key);
err = PTR_ERR_OR_ZERO(root);
if (err && err != -ENOENT) {
@@ -310,16 +327,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root);
- /*
- * The root might have been inserted already, as before we look
- * for orphan roots, log replay might have happened, which
- * triggers a transaction commit and qgroup accounting, which
- * in turn reads and inserts fs roots while doing backref
- * walking.
- */
- if (err == -EEXIST)
- err = 0;
if (err) {
+ BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
break;
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 19f532e7d..6dc4296ee 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -223,8 +223,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
size -= n;
buf += n;
copied += n;
- if (!m->count)
+ if (!m->count) {
+ m->from = 0;
m->index++;
+ }
if (!size)
goto Done;
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f35523d4f..b803213d1 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
* If buf != of->prealloc_buf, we don't know how
* large it is, so cannot safely pass it to ->show
*/
- if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+ if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (pos) {
+ if (len <= pos)
+ return 0;
+ len -= pos;
+ memmove(buf, buf + pos, len);
+ }
return min(count, len);
}
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 288fac529..47f950856 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -985,7 +985,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
return NULL;
}
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
= { (void *) table_id, \
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0f9bafa17..d98780ca9 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -62,7 +62,6 @@ struct serio;
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-bool i8042_check_port_owner(const struct serio *);
int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
struct serio *serio));
int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
@@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline bool i8042_check_port_owner(const struct serio *serio)
-{
- return false;
-}
-
static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
struct serio *serio))
{
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 4d7599869..abf31358f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -204,7 +204,7 @@ extern struct task_group root_task_group;
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
- .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .node = NULL, \
.time_slice = HZ, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
INIT_PUSHABLE_TASKS(tsk) \
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 64184d27e..d641a18ab 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -226,6 +226,21 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
+ * cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * @ec_dev: EC device
+ * @msg: Message to write
+ * @return: Num. of bytes transferred on success, <0 on failure
+ */
+int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
* cros_ec_remove - Remove a ChromeOS EC
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8b425c663..ec39a086f 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -264,12 +264,12 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
- /* Build identity map between hwirq and irq */
- MSI_FLAG_IDENTITY_MAP = (1 << 2),
/* Support multiple PCI MSI interrupts */
- MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
/* Support PCI MSIX interrupts */
- MSI_FLAG_PCI_MSIX = (1 << 4),
+ MSI_FLAG_PCI_MSIX = (1 << 3),
+ /* Needs early activate, required for PCI */
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bdbd260d2..77cd44e0f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -59,6 +59,7 @@ struct sched_param {
#include <linux/gfp.h>
#include <linux/magic.h>
#include <linux/cgroup-defs.h>
+#include <linux/skip_lists.h>
#include <asm/processor.h>
@@ -1477,7 +1478,7 @@ struct task_struct {
#ifdef CONFIG_SCHED_BFS
int time_slice;
u64 deadline;
- struct list_head run_list;
+ skiplist_node *node; /* Skip list node id */
u64 last_ran;
u64 sched_time; /* sched_clock time spent running */
#ifdef CONFIG_SMT_NICE
diff --git a/include/linux/serio.h b/include/linux/serio.h
index df4ab5de1..c733cff44 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -31,7 +31,8 @@ struct serio {
struct serio_device_id id;
- spinlock_t lock; /* protects critical sections from port's interrupt handler */
+ /* Protects critical sections from port's interrupt handler */
+ spinlock_t lock;
int (*write)(struct serio *, unsigned char);
int (*open)(struct serio *);
@@ -40,16 +41,29 @@ struct serio {
void (*stop)(struct serio *);
struct serio *parent;
- struct list_head child_node; /* Entry in parent->children list */
+ /* Entry in parent->children list */
+ struct list_head child_node;
struct list_head children;
- unsigned int depth; /* level of nesting in serio hierarchy */
+ /* Level of nesting in serio hierarchy */
+ unsigned int depth;
- struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */
- struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
+ /*
+ * serio->drv is accessed from interrupt handlers; when modifying
+ * caller should acquire serio->drv_mutex and serio->lock.
+ */
+ struct serio_driver *drv;
+ /* Protects serio->drv so attributes can pin current driver */
+ struct mutex drv_mutex;
struct device dev;
struct list_head node;
+
+ /*
+ * For use by PS/2 layer when several ports share hardware and
+ * may get indigestion when exposed to concurrent access (i8042).
+ */
+ struct mutex *ps2_cmd_mutex;
};
#define to_serio_port(d) container_of(d, struct serio, dev)
diff --git a/include/linux/skip_lists.h b/include/linux/skip_lists.h
new file mode 100644
index 000000000..c19a6ea62
--- /dev/null
+++ b/include/linux/skip_lists.h
@@ -0,0 +1,27 @@
+#ifndef _LINUX_SKIP_LISTS_H
+#define _LINUX_SKIP_LISTS_H
+typedef u64 keyType;
+typedef void *valueType;
+
+typedef struct nodeStructure skiplist_node;
+
+struct nodeStructure {
+ int level; /* Levels in this structure */
+ keyType key;
+ valueType value;
+ skiplist_node *next[16];
+ skiplist_node *prev[16];
+};
+
+typedef struct listStructure {
+ int entries;
+ int level; /* Maximum level of the list
+ (1 more than the number of levels in the list) */
+ skiplist_node *header; /* pointer to header */
+} skiplist;
+
+skiplist_node *skiplist_init(void);
+skiplist *new_skiplist(skiplist_node *slnode);
+skiplist_node *skiplist_insert(skiplist_node *slnode, skiplist *l, keyType key, valueType value, unsigned int randseed);
+void skiplist_delnode(skiplist_node *slnode, skiplist *l, skiplist_node *node);
+#endif /* _LINUX_SKIP_LISTS_H */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 51440131d..28c5da6fd 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -330,24 +330,32 @@ TRACE_EVENT(itimer_expire,
#ifdef CONFIG_NO_HZ_COMMON
#define TICK_DEP_NAMES \
- tick_dep_name(NONE) \
+ tick_dep_mask_name(NONE) \
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
tick_dep_name_end(CLOCK_UNSTABLE)
#undef tick_dep_name
+#undef tick_dep_mask_name
#undef tick_dep_name_end
-#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
-#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* The MASK will convert to their bits and they need to be processed too */
+#define tick_dep_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+#define tick_dep_name_end(sdep) TRACE_DEFINE_ENUM(TICK_DEP_BIT_##sdep); \
+ TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
+/* NONE only has a mask defined for it */
+#define tick_dep_mask_name(sdep) TRACE_DEFINE_ENUM(TICK_DEP_MASK_##sdep);
TICK_DEP_NAMES
#undef tick_dep_name
+#undef tick_dep_mask_name
#undef tick_dep_name_end
#define tick_dep_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
+#define tick_dep_mask_name(sdep) { TICK_DEP_MASK_##sdep, #sdep },
#define tick_dep_name_end(sdep) { TICK_DEP_MASK_##sdep, #sdep }
#define show_tick_dep_name(val) \
diff --git a/init/Kconfig b/init/Kconfig
index a51000610..0a06fa1ef 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1084,14 +1084,17 @@ config CGROUP_WRITEBACK
menuconfig CGROUP_SCHED
bool "CPU controller"
- depends on !SCHED_BFS
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
bandwidth allocation to such task groups. It uses cgroups to group
- tasks.
+ tasks. In combination with BFS this is purely a STUB to create the
+ files associated with the CPU controller cgroup but most of the
+ controls do nothing. This is useful for working in environments and
+ with applications that will only work if this control group is
+ present.
-if CGROUP_SCHED
+if CGROUP_SCHED && !SCHED_BFS
config FAIR_GROUP_SCHED
bool "Group scheduling for SCHED_OTHER"
depends on CGROUP_SCHED
diff --git a/kernel/Makefile b/kernel/Makefile
index e2ec54e2b..dd84575cb 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y = fork.o exec_domain.o panic.o \
extable.o params.o \
kthread.o sys_ni.o nsproxy.o \
notifier.o ksysfs.o cred.o reboot.o \
- async.o range.o smpboot.o
+ async.o range.o smpboot.o skip_lists.o
obj-$(CONFIG_MULTIUSER) += groups.o
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 43d43a2d5..e68c0a735 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -242,18 +242,6 @@ unlock:
return ret;
}
-static void event_function_local(struct perf_event *event, event_f func, void *data)
-{
- struct event_function_struct efs = {
- .event = event,
- .func = func,
- .data = data,
- };
-
- int ret = event_function(&efs);
- WARN_ON_ONCE(ret);
-}
-
static void event_function_call(struct perf_event *event, event_f func, void *data)
{
struct perf_event_context *ctx = event->ctx;
@@ -303,6 +291,54 @@ again:
raw_spin_unlock_irq(&ctx->lock);
}
+/*
+ * Similar to event_function_call() + event_function(), but hard assumes IRQs
+ * are already disabled and we're on the right CPU.
+ */
+static void event_function_local(struct perf_event *event, event_f func, void *data)
+{
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ struct task_struct *task = READ_ONCE(ctx->task);
+ struct perf_event_context *task_ctx = NULL;
+
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (task) {
+ if (task == TASK_TOMBSTONE)
+ return;
+
+ task_ctx = ctx;
+ }
+
+ perf_ctx_lock(cpuctx, task_ctx);
+
+ task = ctx->task;
+ if (task == TASK_TOMBSTONE)
+ goto unlock;
+
+ if (task) {
+ /*
+ * We must be either inactive or active and the right task,
+ * otherwise we're screwed, since we cannot IPI to somewhere
+ * else.
+ */
+ if (ctx->is_active) {
+ if (WARN_ON_ONCE(task != current))
+ goto unlock;
+
+ if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
+ goto unlock;
+ }
+ } else {
+ WARN_ON_ONCE(&cpuctx->ctx != ctx);
+ }
+
+ func(event, cpuctx, ctx, data);
+unlock:
+ perf_ctx_unlock(cpuctx, task_ctx);
+}
+
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP |\
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index b7a525ab2..8c50276b6 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
err = -EAGAIN;
ptep = page_check_address(page, mm, addr, &ptl, 0);
- if (!ptep)
+ if (!ptep) {
+ mem_cgroup_cancel_charge(kpage, memcg, false);
goto unlock;
+ }
get_page(kpage);
page_add_new_anon_rmap(kpage, vma, addr, false);
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
err = 0;
unlock:
- mem_cgroup_cancel_charge(kpage, memcg, false);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(page);
return err;
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 38e89ce7b..0afe671f1 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -324,7 +324,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
msi_alloc_info_t arg;
struct msi_desc *desc;
- int i, ret, virq = -1;
+ int i, ret, virq;
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
if (ret)
@@ -332,12 +332,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
for_each_msi_entry(desc, dev) {
ops->set_desc(&arg, desc);
- if (info->flags & MSI_FLAG_IDENTITY_MAP)
- virq = (int)ops->get_hwirq(info, &arg);
- else
- virq = -1;
- virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used,
+ virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
dev_to_node(dev), &arg, false);
if (virq < 0) {
ret = -ENOSPC;
@@ -361,6 +357,17 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
else
dev_dbg(dev, "irq [%d-%d] for MSI\n",
virq, virq + desc->nvec_used - 1);
+ /*
+ * This flag is set by the PCI layer as we need to activate
+ * the MSI entries before the PCI layer enables MSI in the
+ * card. Otherwise the card latches a random msi message.
+ */
+ if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
+ struct irq_data *irq_data;
+
+ irq_data = irq_domain_get_irq_data(domain, desc->irq);
+ irq_domain_activate_irq(irq_data);
+ }
}
return 0;
diff --git a/kernel/sched/bfs.c b/kernel/sched/bfs.c
index 6fd00c5ae..4168a5527 100644
--- a/kernel/sched/bfs.c
+++ b/kernel/sched/bfs.c
@@ -74,6 +74,7 @@
#include <linux/context_tracking.h>
#include <linux/sched/prio.h>
#include <linux/tick.h>
+#include <linux/skip_lists.h>
#include <asm/irq_regs.h>
#include <asm/switch_to.h>
@@ -136,7 +137,7 @@
void print_scheduler_version(void)
{
- printk(KERN_INFO "BFS CPU scheduler v0.472 by Con Kolivas.\n");
+ printk(KERN_INFO "BFS CPU scheduler v0.490 by Con Kolivas.\n");
}
/*
@@ -190,8 +191,6 @@ struct global_rq {
unsigned long nr_running;
unsigned long nr_uninterruptible;
unsigned long long nr_switches;
- struct list_head queue[PRIO_LIMIT];
- DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1);
unsigned long qnr; /* queued not running */
#ifdef CONFIG_SMP
cpumask_t cpu_idle_map;
@@ -204,6 +203,9 @@ struct global_rq {
raw_spinlock_t iso_lock;
int iso_ticks;
bool iso_refractory;
+
+ skiplist_node *node;
+ skiplist *sl;
};
#ifdef CONFIG_SMP
@@ -538,24 +540,25 @@ static inline bool deadline_after(u64 deadline, u64 time)
}
/*
- * A task that is queued but not running will be on the grq run list.
- * A task that is not running or queued will not be on the grq run list.
- * A task that is currently running will have ->on_cpu set but not on the
- * grq run list.
+ * A task that is not running or queued will not have a node set.
+ * A task that is queued but not running will have a node set.
+ * A task that is currently running will have ->on_cpu set but no node set.
*/
static inline bool task_queued(struct task_struct *p)
{
- return (!list_empty(&p->run_list));
+ return p->node;
}
/*
- * Removing from the global runqueue. Enter with grq locked.
+ * Removing from the global runqueue. Enter with grq locked. Deleting a task
+ * from the skip list is done via the stored node reference in the task struct
+ * and does not require a full look up. Thus it occurs in O(k) time where k
+ * is the "level" of the list the task was stored at - usually < 4, max 16.
*/
static void dequeue_task(struct task_struct *p)
{
- list_del_init(&p->run_list);
- if (list_empty(grq.queue + p->prio))
- __clear_bit(p->prio, grq.prio_bitmap);
+ skiplist_delnode(grq.node, grq.sl, p->node);
+ p->node = NULL;
sched_info_dequeued(task_rq(p), p);
}
@@ -583,6 +586,9 @@ static bool isoprio_suitable(void)
*/
static void enqueue_task(struct task_struct *p, struct rq *rq)
{
+ unsigned int randseed;
+ u64 sl_id;
+
if (!rt_task(p)) {
/* Check it hasn't gotten rt from PI */
if ((idleprio_task(p) && idleprio_suitable(p)) ||
@@ -591,8 +597,32 @@ static void enqueue_task(struct task_struct *p, struct rq *rq)
else
p->prio = NORMAL_PRIO;
}
- __set_bit(p->prio, grq.prio_bitmap);
- list_add_tail(&p->run_list, grq.queue + p->prio);
+ /*
+ * The sl_id key passed to the skiplist generates a sorted list.
+ * Realtime and sched iso tasks run FIFO so they only need be sorted
+ * according to priority. The skiplist will put tasks of the same
+ * key inserted later in FIFO order. Tasks of sched normal, batch
+ * and idleprio are sorted according to their deadlines. Idleprio
+ * tasks are offset by an impossibly large deadline value ensuring
+ * they get sorted into last positions, but still according to their
+ * own deadlines. This creates a "landscape" of skiplists running
+ * from priority 0 realtime in first place to the lowest priority
+ * idleprio tasks last. Skiplist insertion is an O(log n) process.
+ */
+ if (p->prio <= ISO_PRIO)
+ sl_id = p->prio;
+ else {
+ sl_id = p->deadline;
+ /* Set it to cope with 4 left shifts with locality_diff */
+ if (p->prio == IDLE_PRIO)
+ sl_id |= 0x0F00000000000000;
+ }
+ /*
+ * Some architectures don't have better than microsecond resolution
+ * so mask out ~microseconds as the random seed for skiplist insertion.
+ */
+ randseed = (grq.niffies >> 10) & 0xFFFFFFFF;
+ p->node = skiplist_insert(grq.node, grq.sl, sl_id, p, randseed);
sched_info_queued(rq, p);
}
@@ -647,6 +677,113 @@ static inline int queued_notrunning(void)
return grq.qnr;
}
+#ifdef CONFIG_SMT_NICE
+static const cpumask_t *thread_cpumask(int cpu);
+
+/* Find the best real time priority running on any SMT siblings of cpu and if
+ * none are running, the static priority of the best deadline task running.
+ * The lookups to the other runqueues is done lockless as the occasional wrong
+ * value would be harmless. */
+static int best_smt_bias(struct rq *this_rq)
+{
+ int other_cpu, best_bias = 0;
+
+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
+ struct rq *rq = cpu_rq(other_cpu);
+
+ if (rq_idle(rq))
+ continue;
+ if (!rq->online)
+ continue;
+ if (!rq->rq_mm)
+ continue;
+ if (likely(rq->rq_smt_bias > best_bias))
+ best_bias = rq->rq_smt_bias;
+ }
+ return best_bias;
+}
+
+static int task_prio_bias(struct task_struct *p)
+{
+ if (rt_task(p))
+ return 1 << 30;
+ else if (task_running_iso(p))
+ return 1 << 29;
+ else if (task_running_idle(p))
+ return 0;
+ return MAX_PRIO - p->static_prio;
+}
+
+static bool smt_always_schedule(struct task_struct __maybe_unused *p, struct rq __maybe_unused *this_rq)
+{
+ return true;
+}
+
+static bool (*smt_schedule)(struct task_struct *p, struct rq *this_rq) = &smt_always_schedule;
+
+/* We've already decided p can run on CPU, now test if it shouldn't for SMT
+ * nice reasons. */
+static bool smt_should_schedule(struct task_struct *p, struct rq *this_rq)
+{
+ int best_bias, task_bias;
+
+ /* Kernel threads always run */
+ if (unlikely(!p->mm))
+ return true;
+ if (rt_task(p))
+ return true;
+ if (!idleprio_suitable(p))
+ return true;
+ best_bias = best_smt_bias(this_rq);
+ /* The smt siblings are all idle or running IDLEPRIO */
+ if (best_bias < 1)
+ return true;
+ task_bias = task_prio_bias(p);
+ if (task_bias < 1)
+ return false;
+ if (task_bias >= best_bias)
+ return true;
+ /* Dither 25% cpu of normal tasks regardless of nice difference */
+ if (best_bias % 4 == 1)
+ return true;
+ /* Sorry, you lose */
+ return false;
+}
+
+static unsigned long cpu_load_avg(struct rq *rq)
+{
+ return rq->soft_affined * SCHED_CAPACITY_SCALE;
+}
+
+/*
+ * This is the proportion of SCHED_CAPACITY_SCALE (1024) used when each thread
+ * of a CPU with SMT siblings is in use.
+ */
+#define SCHED_SMT_LOAD (890)
+
+/*
+ * Load of a CPU with smt siblings should be considered to be the load from all
+ * the SMT siblings, thus will be >1 if both threads are in use since they are
+ * not full cores.
+ */
+static unsigned long smt_load_avg(struct rq *rq)
+{
+ unsigned long load = rq->soft_affined * SCHED_SMT_LOAD;
+ int cpu;
+
+ for_each_cpu(cpu, thread_cpumask(rq->cpu))
+ load += cpu_rq(cpu)->soft_affined * SCHED_SMT_LOAD;
+ return load;
+}
+
+static unsigned long (*rq_load_avg)(struct rq *rq) = &cpu_load_avg;
+#else
+#define smt_schedule(p, this_rq) (true)
+static inline unsigned long rq_load_avg(struct rq *rq)
+{
+ return rq->soft_affined * SCHED_CAPACITY_SCALE;
+}
+#endif
#ifdef CONFIG_SMP
/*
* The cpu_idle_map stores a bitmap of all the CPUs currently idle to
@@ -691,7 +828,7 @@ static inline bool scaling_rq(struct rq *rq);
* lowest value would give the most suitable CPU to schedule p onto next. The
* order works out to be the following:
*
- * Same core, idle or busy cache, idle or busy threads
+ * Same thread, idle or busy cache, idle or busy threads
* Other core, same cache, idle or busy cache, idle threads.
* Same node, other CPU, idle cache, idle threads.
* Same node, other CPU, busy cache, idle threads.
@@ -729,13 +866,13 @@ static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
#ifdef CONFIG_SCHED_MC
else if (locality == 2)
ranking |= CPUIDLE_DIFF_CORE;
- if (!(tmp_rq->cache_idle(cpu_tmp)))
+ else if (!(tmp_rq->cache_idle(tmp_rq)))
ranking |= CPUIDLE_CACHE_BUSY;
#endif
#ifdef CONFIG_SCHED_SMT
if (locality == 1)
ranking |= CPUIDLE_DIFF_THREAD;
- if (!(tmp_rq->siblings_idle(cpu_tmp)))
+ if (!(tmp_rq->siblings_idle(tmp_rq)))
ranking |= CPUIDLE_THREAD_BUSY;
#endif
if (scaling_rq(tmp_rq))
@@ -763,90 +900,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
return (this_rq->cpu_locality[that_cpu] < 3);
}
-#ifdef CONFIG_SMT_NICE
-static const cpumask_t *thread_cpumask(int cpu);
-
-/* Find the best real time priority running on any SMT siblings of cpu and if
- * none are running, the static priority of the best deadline task running.
- * The lookups to the other runqueues is done lockless as the occasional wrong
- * value would be harmless. */
-static int best_smt_bias(int cpu)
-{
- int other_cpu, best_bias = 0;
-
- for_each_cpu(other_cpu, thread_cpumask(cpu)) {
- struct rq *rq;
-
- if (other_cpu == cpu)
- continue;
- rq = cpu_rq(other_cpu);
- if (rq_idle(rq))
- continue;
- if (!rq->online)
- continue;
- if (!rq->rq_mm)
- continue;
- if (likely(rq->rq_smt_bias > best_bias))
- best_bias = rq->rq_smt_bias;
- }
- return best_bias;
-}
-
-static int task_prio_bias(struct task_struct *p)
-{
- if (rt_task(p))
- return 1 << 30;
- else if (task_running_iso(p))
- return 1 << 29;
- else if (task_running_idle(p))
- return 0;
- return MAX_PRIO - p->static_prio;
-}
-
-/* We've already decided p can run on CPU, now test if it shouldn't for SMT
- * nice reasons. */
-static bool smt_should_schedule(struct task_struct *p, int cpu)
-{
- int best_bias, task_bias;
-
- /* Kernel threads always run */
- if (unlikely(!p->mm))
- return true;
- if (rt_task(p))
- return true;
- if (!idleprio_suitable(p))
- return true;
- best_bias = best_smt_bias(cpu);
- /* The smt siblings are all idle or running IDLEPRIO */
- if (best_bias < 1)
- return true;
- task_bias = task_prio_bias(p);
- if (task_bias < 1)
- return false;
- if (task_bias >= best_bias)
- return true;
- /* Dither 25% cpu of normal tasks regardless of nice difference */
- if (best_bias % 4 == 1)
- return true;
- /* Sorry, you lose */
- return false;
-}
-#else
-#define smt_should_schedule(p, cpu) (1)
-#endif
-
static bool resched_best_idle(struct task_struct *p)
{
cpumask_t tmpmask;
+ struct rq *rq;
int best_cpu;
cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map);
best_cpu = best_mask_cpu(task_cpu(p), task_rq(p), &tmpmask);
-#ifdef CONFIG_SMT_NICE
- if (!smt_should_schedule(p, best_cpu))
+ rq = cpu_rq(best_cpu);
+ if (!smt_schedule(p, rq))
return false;
-#endif
- resched_curr(cpu_rq(best_cpu));
+ resched_curr(rq);
return true;
}
@@ -953,6 +1018,26 @@ static int effective_prio(struct task_struct *p)
}
/*
+ * Update the load average for feeding into cpu frequency governors. Use a rolling
+ * average with ~ time constant of 32ms
+ */
+static void update_load_avg(struct rq *rq)
+{
+ /* rq clock can go backwards so skip update if that happens */
+ if (likely(rq->clock > rq->load_update)) {
+ unsigned long us_interval = (rq->clock - rq->load_update) >> 10;
+ long load;
+
+ load = rq->load_avg - (rq->load_avg * us_interval * 80 / 32768 / 128);
+ if (unlikely(load < 0))
+ load = 0;
+ load += rq->soft_affined * rq_load_avg(rq) * us_interval * 80 / 32768 / 128;
+ rq->load_avg = load;
+ }
+ rq->load_update = rq->clock;
+}
+
+/*
* activate_task - move a task to the runqueue. Enter with grq locked.
*/
static void activate_task(struct task_struct *p, struct rq *rq)
@@ -978,7 +1063,8 @@ static void activate_task(struct task_struct *p, struct rq *rq)
p->on_rq = 1;
grq.nr_running++;
inc_qnr();
- cpufreq_trigger(grq.niffies, rq->soft_affined);
+ update_load_avg(rq);
+ cpufreq_trigger(grq.niffies, rq->load_avg);
}
static inline void clear_sticky(struct task_struct *p);
@@ -995,19 +1081,22 @@ static inline void deactivate_task(struct task_struct *p, struct rq *rq)
p->on_rq = 0;
grq.nr_running--;
clear_sticky(p);
- cpufreq_trigger(grq.niffies, rq->soft_affined);
+ update_load_avg(rq);
+ cpufreq_trigger(grq.niffies, rq->load_avg);
}
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
+ unsigned int tcpu;
+
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold grq lock.
*/
WARN_ON_ONCE(debug_locks && !lockdep_is_held(&grq.lock));
#endif
- if (task_cpu(p) == cpu)
+ if ((tcpu = task_cpu(p)) == cpu)
return;
trace_sched_migrate_task(p, cpu);
perf_event_task_migrate(p);
@@ -1019,8 +1108,21 @@ void set_task_cpu(struct task_struct *p, unsigned int cpu)
*/
smp_wmb();
if (p->on_rq) {
- task_rq(p)->soft_affined--;
- cpu_rq(cpu)->soft_affined++;
+ struct rq *rq;
+
+ /*
+ * set_task_cpu can be set on other CPUs so call cpufreq_trigger
+ * explicitly telling it what CPU is being updated as the value
+ * of soft_affined has changed.
+ */
+ rq = task_rq(p);
+ rq->soft_affined--;
+ update_load_avg(rq);
+ other_cpufreq_trigger(tcpu, grq.niffies, rq->load_avg);
+ rq = cpu_rq(cpu);
+ rq->soft_affined++;
+ update_load_avg(rq);
+ other_cpufreq_trigger(cpu, grq.niffies, rq->load_avg);
}
task_thread_info(p)->cpu = cpu;
}
@@ -1353,13 +1455,10 @@ static inline bool needs_other_cpu(struct task_struct *p, int cpu)
return false;
}
-/*
- * When all else is equal, still prefer this_rq.
- */
static void try_preempt(struct task_struct *p, struct rq *this_rq)
{
+ int cpu, pcpu, highest_prio, highest_cpu;
struct rq *highest_prio_rq = NULL;
- int cpu, highest_prio;
u64 latest_deadline;
cpumask_t tmp;
@@ -1383,13 +1482,13 @@ static void try_preempt(struct task_struct *p, struct rq *this_rq)
return;
/* See if this task can preempt the task on the current CPU first. */
- cpu = cpu_of(this_rq);
- if (cpumask_test_cpu(cpu, &tmp)) {
- if (smt_should_schedule(p, cpu) && can_preempt(p, this_rq->rq_prio, this_rq->rq_deadline)) {
+ pcpu = cpu_of(this_rq);
+ if (likely(cpumask_test_cpu(pcpu, &tmp))) {
+ if (smt_schedule(p, this_rq) && can_preempt(p, this_rq->rq_prio, this_rq->rq_deadline)) {
resched_curr(this_rq);
return;
}
- cpumask_clear_cpu(cpu, &tmp);
+ cpumask_clear_cpu(pcpu, &tmp);
}
highest_prio = latest_deadline = 0;
@@ -1398,37 +1497,40 @@ static void try_preempt(struct task_struct *p, struct rq *this_rq)
for_each_cpu(cpu, &tmp) {
struct rq *rq;
int rq_prio;
+ u64 dl;
rq = cpu_rq(cpu);
rq_prio = rq->rq_prio;
if (rq_prio < highest_prio)
continue;
+ dl = rq->rq_deadline;
+ if (!sched_interactive && pcpu != cpu)
+ dl <<= locality_diff(pcpu, rq);
if (rq_prio > highest_prio ||
- deadline_after(rq->rq_deadline, latest_deadline)) {
- latest_deadline = rq->rq_deadline;
+ deadline_after(dl, latest_deadline)) {
+ latest_deadline = dl;
highest_prio = rq_prio;
+ highest_cpu = cpu;
highest_prio_rq = rq;
}
}
- if (likely(highest_prio_rq)) {
-#ifdef CONFIG_SMT_NICE
- cpu = cpu_of(highest_prio_rq);
- if (!smt_should_schedule(p, cpu))
- return;
-#endif
- if (can_preempt(p, highest_prio, latest_deadline)) {
- /*
- * If we have decided this task should preempt this CPU,
- * set the task's CPU to match so there is no discrepancy
- * in earliest_deadline_task which biases away tasks with
- * a different CPU set. This means waking tasks are
- * treated differently to rescheduling tasks.
- */
- set_task_cpu(p, cpu);
- resched_curr(highest_prio_rq);
- }
+ if (unlikely(!highest_prio_rq))
+ return;
+ if (!smt_schedule(p, highest_prio_rq))
+ return;
+ if (can_preempt(p, highest_prio, latest_deadline)) {
+ /*
+ * If we have decided this task should preempt this CPU,
+ * set the task's CPU to match so there is no discrepancy
+ * in earliest_deadline_task which biases away tasks with
+ * a different CPU set. This means waking tasks are
+ * treated differently to rescheduling tasks in
+ * interactive mode.
+ */
+ set_task_cpu(p, highest_cpu);
+ resched_curr(highest_prio_rq);
}
}
static int __set_cpus_allowed_ptr(struct task_struct *p,
@@ -1723,7 +1825,7 @@ int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
p->sched_reset_on_fork = 0;
}
- INIT_LIST_HEAD(&p->run_list);
+ p->node = NULL;
#ifdef CONFIG_SCHED_INFO
if (unlikely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -3072,6 +3174,8 @@ void scheduler_tick(void)
/* grq lock not grabbed, so only update rq clock */
update_rq_clock(rq);
update_cpu_clock_tick(rq, rq->curr);
+ update_load_avg(rq);
+ cpufreq_trigger(grq.niffies, rq->load_avg);
if (!rq_idle(rq))
task_running_tick(rq);
else
@@ -3280,101 +3384,56 @@ found_middle:
}
/*
- * O(n) lookup of all tasks in the global runqueue. The real brainfuck
- * of lock contention and O(n). It's not really O(n) as only the queued,
- * but not running tasks are scanned, and is O(n) queued in the worst case
- * scenario only because the right task can be found before scanning all of
- * them.
- * Tasks are selected in this order:
- * Real time tasks are selected purely by their static priority and in the
- * order they were queued, so the lowest value idx, and the first queued task
- * of that priority value is chosen.
- * If no real time tasks are found, the SCHED_ISO priority is checked, and
- * all SCHED_ISO tasks have the same priority value, so they're selected by
- * the earliest deadline value.
- * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the
- * earliest deadline.
- * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are
- * selected by the earliest deadline.
+ * Task selection with skiplists is a simple matter of picking off the first
+ * task in the sorted list, an O(1) operation. The only time it takes longer
+ * is if tasks do not have suitable affinity and then we iterate over entries
+ * till we find the first that does. Worst case here is no tasks with suitable
+ * affinity and taking O(n).
*/
static inline struct
task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
{
- struct task_struct *edt = NULL;
- unsigned long idx = -1;
+ struct task_struct *edt = idle;
+ skiplist_node *node = grq.node;
+ u64 earliest_deadline = ~0ULL;
- do {
- struct list_head *queue;
- struct task_struct *p;
- u64 earliest_deadline;
-
- idx = next_sched_bit(grq.prio_bitmap, ++idx);
- if (idx >= PRIO_LIMIT)
- return idle;
- queue = grq.queue + idx;
-
- if (idx < MAX_RT_PRIO) {
- /* We found an rt task */
- list_for_each_entry(p, queue, run_list) {
- /* Make sure cpu affinity is ok */
- if (needs_other_cpu(p, cpu))
- continue;
- edt = p;
- goto out_take;
- }
- /*
- * None of the RT tasks at this priority can run on
- * this cpu
- */
+ while ((node = node->next[0]) != grq.node) {
+ struct task_struct *p = node->value;
+ int tcpu;
+
+ /* Make sure affinity is ok */
+ if (needs_other_cpu(p, cpu))
continue;
- }
- /*
- * No rt tasks. Find the earliest deadline task. Now we're in
- * O(n) territory.
- */
- earliest_deadline = ~0ULL;
- list_for_each_entry(p, queue, run_list) {
+ if (!smt_schedule(p, rq))
+ continue;
+
+ if (!sched_interactive && (tcpu = task_cpu(p)) != cpu) {
u64 dl;
- /* Make sure cpu affinity is ok */
- if (needs_other_cpu(p, cpu))
+ if (task_sticky(p) && scaling_rq(rq))
continue;
-
-#ifdef CONFIG_SMT_NICE
- if (!smt_should_schedule(p, cpu))
+ dl = p->deadline << locality_diff(tcpu, rq);
+ if (unlikely(!deadline_before(dl, earliest_deadline)))
continue;
-#endif
- /*
- * Soft affinity happens here by not scheduling a task
- * with its sticky flag set that ran on a different CPU
- * last when the CPU is scaling, or by greatly biasing
- * against its deadline when not, based on cpu cache
- * locality.
- */
- if (sched_interactive)
- dl = p->deadline;
- else {
- int tcpu = task_cpu(p);
-
- if (tcpu != cpu && task_sticky(p) && scaling_rq(rq))
- continue;
- dl = p->deadline << locality_diff(tcpu, rq);
- }
-
- if (deadline_before(dl, earliest_deadline)) {
- earliest_deadline = dl;
- edt = p;
- }
+ earliest_deadline = dl;
+ edt = p;
+ /* We continue even though we've found the earliest
+ * deadline task as the locality offset means there
+ * may be a better candidate after it. */
+ continue;
}
- } while (!edt);
-
-out_take:
- take_task(cpu, edt);
+ /* This wouldn't happen if we encountered a better deadline from
+ * another CPU and have already set edt. */
+ if (likely(p->deadline < earliest_deadline))
+ edt = p;
+ break;
+ }
+ if (likely(edt != idle))
+ take_task(cpu, edt);
return edt;
}
-
/*
* Print scheduling while atomic bug:
*/
@@ -3454,44 +3513,47 @@ static void reset_rq_task(struct rq *rq, struct task_struct *p)
}
#ifdef CONFIG_SMT_NICE
+static void check_no_siblings(struct rq __maybe_unused *this_rq) {}
+static void wake_no_siblings(struct rq __maybe_unused *this_rq) {}
+static void (*check_siblings)(struct rq *this_rq) = &check_no_siblings;
+static void (*wake_siblings)(struct rq *this_rq) = &wake_no_siblings;
+
/* Iterate over smt siblings when we've scheduled a process on cpu and decide
* whether they should continue running or be descheduled. */
-static void check_smt_siblings(int cpu)
+static void check_smt_siblings(struct rq *this_rq)
{
int other_cpu;
- for_each_cpu(other_cpu, thread_cpumask(cpu)) {
+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
struct task_struct *p;
struct rq *rq;
- if (other_cpu == cpu)
- continue;
rq = cpu_rq(other_cpu);
if (rq_idle(rq))
continue;
if (!rq->online)
continue;
p = rq->curr;
- if (!smt_should_schedule(p, cpu)) {
+ if (!smt_should_schedule(p, this_rq)) {
set_tsk_need_resched(p);
smp_send_reschedule(other_cpu);
}
}
}
-static void wake_smt_siblings(int cpu)
+static void wake_smt_siblings(struct rq *this_rq)
{
int other_cpu;
if (!queued_notrunning())
return;
- for_each_cpu(other_cpu, thread_cpumask(cpu)) {
+ for_each_cpu(other_cpu, &this_rq->thread_mask) {
struct rq *rq;
- if (other_cpu == cpu)
- continue;
rq = cpu_rq(other_cpu);
+ if (!rq->online)
+ continue;
if (rq_idle(rq)) {
struct task_struct *p = rq->curr;
@@ -3501,8 +3563,8 @@ static void wake_smt_siblings(int cpu)
}
}
#else
-static void check_smt_siblings(int __maybe_unused cpu) {}
-static void wake_smt_siblings(int __maybe_unused cpu) {}
+static void check_siblings(struct rq __maybe_unused *this_rq) {}
+static void wake_siblings(struct rq __maybe_unused *this_rq) {}
#endif
/*
@@ -3639,7 +3701,7 @@ static void __sched notrace __schedule(bool preempt)
* again.
*/
set_rq_task(rq, prev);
- check_smt_siblings(cpu);
+ check_siblings(rq);
grq_unlock_irq();
goto rerun_prev_unlocked;
} else
@@ -3679,9 +3741,9 @@ static void __sched notrace __schedule(bool preempt)
unstick_task(rq, prev);
set_rq_task(rq, next);
if (next != idle)
- check_smt_siblings(cpu);
+ check_siblings(rq);
else
- wake_smt_siblings(cpu);
+ wake_siblings(rq);
grq.nr_switches++;
prev->on_cpu = false;
next->on_cpu = true;
@@ -3693,7 +3755,7 @@ static void __sched notrace __schedule(bool preempt)
cpu = cpu_of(rq);
idle = rq->idle;
} else {
- check_smt_siblings(cpu);
+ check_siblings(rq);
grq_unlock_irq();
}
@@ -7107,9 +7169,9 @@ int sched_cpu_dying(unsigned int cpu)
* Cheaper version of the below functions in case support for SMT and MC is
* compiled in but CPUs have no siblings.
*/
-static bool sole_cpu_idle(int cpu)
+static bool sole_cpu_idle(struct rq *rq)
{
- return rq_idle(cpu_rq(cpu));
+ return rq_idle(rq);
}
#endif
#ifdef CONFIG_SCHED_SMT
@@ -7118,9 +7180,9 @@ static const cpumask_t *thread_cpumask(int cpu)
return topology_sibling_cpumask(cpu);
}
/* All this CPU's SMT siblings are idle */
-static bool siblings_cpu_idle(int cpu)
+static bool siblings_cpu_idle(struct rq *rq)
{
- return cpumask_subset(thread_cpumask(cpu), &grq.cpu_idle_map);
+ return cpumask_subset(&rq->thread_mask, &grq.cpu_idle_map);
}
#endif
#ifdef CONFIG_SCHED_MC
@@ -7129,9 +7191,9 @@ static const cpumask_t *core_cpumask(int cpu)
return topology_core_cpumask(cpu);
}
/* All this CPU's shared cache siblings are idle */
-static bool cache_cpu_idle(int cpu)
+static bool cache_cpu_idle(struct rq *rq)
{
- return cpumask_subset(core_cpumask(cpu), &grq.cpu_idle_map);
+ return cpumask_subset(&rq->core_mask, &grq.cpu_idle_map);
}
#endif
@@ -7150,6 +7212,9 @@ void __init sched_init_smp(void)
{
struct sched_domain *sd;
int cpu, other_cpu;
+#ifdef CONFIG_SCHED_SMT
+ bool smt_threads = false;
+#endif
cpumask_var_t non_isolated_cpus;
@@ -7209,16 +7274,31 @@ void __init sched_init_smp(void)
if (rq->cpu_locality[other_cpu] > 2)
rq->cpu_locality[other_cpu] = 2;
}
- if (cpumask_weight(core_cpumask(cpu)) > 1)
+ if (cpumask_weight(core_cpumask(cpu)) > 1) {
+ cpumask_copy(&rq->core_mask, core_cpumask(cpu));
+ cpumask_clear_cpu(cpu, &rq->core_mask);
rq->cache_idle = cache_cpu_idle;
+ }
#endif
#ifdef CONFIG_SCHED_SMT
for_each_cpu(other_cpu, thread_cpumask(cpu))
rq->cpu_locality[other_cpu] = 1;
- if (cpumask_weight(thread_cpumask(cpu)) > 1)
+ if (cpumask_weight(thread_cpumask(cpu)) > 1) {
+ cpumask_copy(&rq->thread_mask, thread_cpumask(cpu));
+ cpumask_clear_cpu(cpu, &rq->thread_mask);
rq->siblings_idle = siblings_cpu_idle;
+ smt_threads = true;
+ }
#endif
}
+#ifdef CONFIG_SMT_NICE
+ if (smt_threads) {
+ check_siblings = &check_smt_siblings;
+ wake_siblings = &wake_smt_siblings;
+ smt_schedule = &smt_should_schedule;
+ rq_load_avg = &smt_load_avg;
+ }
+#endif
grq_unlock_irq();
mutex_unlock(&sched_domains_mutex);
@@ -7245,6 +7325,32 @@ int in_sched_functions(unsigned long addr)
&& addr < (unsigned long)__sched_text_end);
}
+#ifdef CONFIG_CGROUP_SCHED
+/* task group related information */
+struct task_group {
+ struct cgroup_subsys_state css;
+
+ struct rcu_head rcu;
+ struct list_head list;
+
+ struct task_group *parent;
+ struct list_head siblings;
+ struct list_head children;
+};
+
+/*
+ * Default task group.
+ * Every task in system belongs to this group at bootup.
+ */
+struct task_group root_task_group;
+LIST_HEAD(task_groups);
+
+/* Cacheline aligned slab cache for task_group */
+static struct kmem_cache *task_group_cache __read_mostly;
+/* task_group_lock serializes the addition/removal of task groups */
+static DEFINE_SPINLOCK(task_group_lock);
+#endif /* CONFIG_CGROUP_SCHED */
+
void __init sched_init(void)
{
#ifdef CONFIG_SMP
@@ -7265,6 +7371,9 @@ void __init sched_init(void)
grq.iso_ticks = 0;
grq.iso_refractory = false;
grq.noc = 1;
+ grq.node = skiplist_init();
+ grq.sl = new_skiplist(grq.node);
+
#ifdef CONFIG_SMP
init_defrootdomain();
grq.qnr = grq.idle_cpus = 0;
@@ -7272,6 +7381,14 @@ void __init sched_init(void)
#else
uprq = &per_cpu(runqueues, 0);
#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+ task_group_cache = KMEM_CACHE(task_group, 0);
+
+ list_add(&root_task_group.list, &task_groups);
+ INIT_LIST_HEAD(&root_task_group.children);
+ INIT_LIST_HEAD(&root_task_group.siblings);
+#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
rq = cpu_rq(i);
rq->grq_lock = &grq.lock;
@@ -7316,11 +7433,6 @@ void __init sched_init(void)
}
#endif
- for (i = 0; i < PRIO_LIMIT; i++)
- INIT_LIST_HEAD(grq.queue + i);
- /* delimiter for bitsearch */
- __set_bit(PRIO_LIMIT, grq.prio_bitmap);
-
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&init_task.preempt_notifiers);
#endif
@@ -7702,3 +7814,127 @@ unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
return smt_gain;
}
#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+static void sched_free_group(struct task_group *tg)
+{
+ kmem_cache_free(task_group_cache, tg);
+}
+
+/* allocate runqueue etc for a new task group */
+struct task_group *sched_create_group(struct task_group *parent)
+{
+ struct task_group *tg;
+
+ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ return tg;
+}
+
+void sched_online_group(struct task_group *tg, struct task_group *parent)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task_group_lock, flags);
+ list_add_rcu(&tg->list, &task_groups);
+
+ WARN_ON(!parent); /* root should already exist */
+
+ tg->parent = parent;
+ INIT_LIST_HEAD(&tg->children);
+ list_add_rcu(&tg->siblings, &parent->children);
+ spin_unlock_irqrestore(&task_group_lock, flags);
+}
+
+/* rcu callback to free various structures associated with a task group */
+static void sched_free_group_rcu(struct rcu_head *rhp)
+{
+ /* now it should be safe to free those cfs_rqs */
+ sched_free_group(container_of(rhp, struct task_group, rcu));
+}
+
+void sched_destroy_group(struct task_group *tg)
+{
+ /* wait for possible concurrent references to cfs_rqs complete */
+ call_rcu(&tg->rcu, sched_free_group_rcu);
+}
+
+void sched_offline_group(struct task_group *tg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task_group_lock, flags);
+ list_del_rcu(&tg->list);
+ list_del_rcu(&tg->siblings);
+ spin_unlock_irqrestore(&task_group_lock, flags);
+}
+
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct task_group, css) : NULL;
+}
+
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct task_group *parent = css_tg(parent_css);
+ struct task_group *tg;
+
+ if (!parent) {
+ /* This is early initialization for the top cgroup */
+ return &root_task_group.css;
+ }
+
+ tg = sched_create_group(parent);
+ if (IS_ERR(tg))
+ return ERR_PTR(-ENOMEM);
+ return &tg->css;
+}
+
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ sched_offline_group(tg);
+}
+
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ /*
+ * Relies on the RCU grace period between css_released() and this.
+ */
+ sched_free_group(tg);
+}
+
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+}
+
+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
+{
+ return 0;
+}
+
+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+{
+}
+
+static struct cftype cpu_files[] = {
+ { } /* terminate */
+};
+
+struct cgroup_subsys cpu_cgrp_subsys = {
+ .css_alloc = cpu_cgroup_css_alloc,
+ .css_released = cpu_cgroup_css_released,
+ .css_free = cpu_cgroup_css_free,
+ .fork = cpu_cgroup_fork,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+ .legacy_cftypes = cpu_files,
+ .early_init = true,
+};
+#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/bfs_sched.h b/kernel/sched/bfs_sched.h
index 9ab0ec66a..06b2cdca7 100644
--- a/kernel/sched/bfs_sched.h
+++ b/kernel/sched/bfs_sched.h
@@ -24,6 +24,8 @@ struct rq {
int rq_prio;
bool rq_running; /* There is a task running */
int soft_affined; /* Running or queued tasks with this set as their rq */
+ u64 load_update; /* When we last updated load */
+ unsigned long load_avg; /* Rolling load average */
#ifdef CONFIG_SMT_NICE
struct mm_struct *rq_mm;
int rq_smt_bias; /* Policy/nice level bias across smt siblings */
@@ -44,11 +46,13 @@ struct rq {
struct sched_domain *sd;
int *cpu_locality; /* CPU relative cache distance */
#ifdef CONFIG_SCHED_SMT
- bool (*siblings_idle)(int cpu);
+ cpumask_t thread_mask;
+ bool (*siblings_idle)(struct rq *rq);
/* See if all smt siblings are idle */
#endif /* CONFIG_SCHED_SMT */
#ifdef CONFIG_SCHED_MC
- bool (*cache_idle)(int cpu);
+ cpumask_t core_mask;
+ bool (*cache_idle)(struct rq *rq);
/* See if all cache siblings are idle */
#endif /* CONFIG_SCHED_MC */
u64 last_niffy; /* Last time this RQ updated grq.niffies */
@@ -199,14 +203,29 @@ static inline void cpufreq_trigger(u64 time, unsigned long util)
{
struct update_util_data *data;
+ if (util > SCHED_CAPACITY_SCALE)
+ util = SCHED_CAPACITY_SCALE;
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
if (data)
- data->func(data, time, util, 0);
+ data->func(data, time, util, SCHED_CAPACITY_SCALE);
+}
+
+static inline void other_cpufreq_trigger(int cpu, u64 time, unsigned long util)
+{
+ struct update_util_data *data;
+
+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu));
+ if (data)
+ data->func(data, time, util, SCHED_CAPACITY_SCALE);
}
#else
static inline void cpufreq_trigger(u64 time, unsigned long util)
{
}
+
+static inline void other_cpufreq_trigger(int cpu, u64 time, unsigned long util)
+{
+}
#endif /* CONFIG_CPU_FREQ */
#ifdef arch_scale_freq_capacity
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 75f98c549..a24cfb41d 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -603,19 +603,25 @@ static void cputime_adjust(struct task_cputime *curr,
stime = curr->stime;
utime = curr->utime;
- if (utime == 0) {
- stime = rtime;
+ /*
+ * If either stime or both stime and utime are 0, assume all runtime is
+ * userspace. Once a task gets some ticks, the monotonicy code at
+ * 'update' will ensure things converge to the observed ratio.
+ */
+ if (stime == 0) {
+ utime = rtime;
goto update;
}
- if (stime == 0) {
- utime = rtime;
+ if (utime == 0) {
+ stime = rtime;
goto update;
}
stime = scale_stime((__force u64)stime, (__force u64)rtime,
(__force u64)(stime + utime));
+update:
/*
* Make sure stime doesn't go backwards; this preserves monotonicity
* for utime because rtime is monotonic.
@@ -638,7 +644,6 @@ static void cputime_adjust(struct task_cputime *curr,
stime = rtime - utime;
}
-update:
prev->stime = stime;
prev->utime = utime;
out:
diff --git a/kernel/skip_lists.c b/kernel/skip_lists.c
new file mode 100644
index 000000000..4d82ee18b
--- /dev/null
+++ b/kernel/skip_lists.c
@@ -0,0 +1,177 @@
+/*
+ Copyright (C) 2011,2016 Con Kolivas.
+
+ Code based on example originally by William Pugh.
+
+Skip Lists are a probabilistic alternative to balanced trees, as
+described in the June 1990 issue of CACM and were invented by
+William Pugh in 1987.
+
+A couple of comments about this implementation:
+The routine randomLevel has been hard-coded to generate random
+levels using p=0.25. It can be easily changed.
+
+The insertion routine has been implemented so as to use the
+dirty hack described in the CACM paper: if a random level is
+generated that is more than the current maximum level, the
+current maximum level plus one is used instead.
+
+Levels start at zero and go up to MaxLevel (which is equal to
+MaxNumberOfLevels-1).
+
+The routines defined in this file are:
+
+init: defines slnode
+
+new_skiplist: returns a new, empty list
+
+randomLevel: Returns a random level based on a u64 random seed passed to it.
+In BFS, the "niffy" time is used for this purpose.
+
+insert(l,key, value): inserts the binding (key, value) into l. This operation
+occurs in O(log n) time.
+
+delnode(slnode, l, node): deletes any binding of key from the l based on the
+actual node value. This operation occurs in O(k) time where k is the
+number of levels of the node in question (max 16). The original delete
+function occurred in O(log n) time and involved a search.
+
+BFS Notes: In this implementation of skiplists, there are bidirectional
+next/prev pointers and the insert function returns a pointer to the actual
+node the value is stored. The key here is chosen by the scheduler so as to
+sort tasks according to the priority list requirements and is no longer used
+by the scheduler after insertion. The scheduler lookup, however, occurs in
+O(1) time because it is always the first item in the level 0 linked list.
+Since the task struct stores a copy of the node pointer upon skiplist_insert,
+it can also remove it much faster than the original implementation with the
+aid of prev<->next pointer manipulation and no searching.
+
+*/
+
+#include <linux/slab.h>
+#include <linux/skip_lists.h>
+
+#define MaxNumberOfLevels 16
+#define MaxLevel (MaxNumberOfLevels - 1)
+#define newNode kmalloc(sizeof(skiplist_node), GFP_ATOMIC)
+
+skiplist_node *skiplist_init(void)
+{
+ skiplist_node *slnode = newNode;
+ int i;
+
+ BUG_ON(!slnode);
+ slnode->key = 0xFFFFFFFFFFFFFFFF;
+ slnode->level = 0;
+ slnode->value = NULL;
+ for (i = 0; i < MaxNumberOfLevels; i++)
+ slnode->next[i] = slnode->prev[i] = slnode;
+ return slnode;
+}
+
+skiplist *new_skiplist(skiplist_node *slnode)
+{
+ skiplist *l = kmalloc(sizeof(skiplist), GFP_ATOMIC);
+
+ BUG_ON(!l);
+ l->entries = 0;
+ l->level = 0;
+ l->header = slnode;
+ return l;
+}
+
+void free_skiplist(skiplist_node *slnode, skiplist *l)
+{
+ skiplist_node *p, *q;
+
+ p = l->header;
+ do {
+ q = p->next[0];
+ p->next[0]->prev[0] = q->prev[0];
+ kfree(p);
+ p = q;
+ } while (p != slnode);
+ kfree(l);
+}
+
+/*
+ * Returns a pseudo-random number based on the randseed value by masking out
+ * 0-15. As many levels are not required when only few values are on the list,
+ * we limit the height of the levels according to how many list entries there
+ * are in a cheap manner. The height of the levels may have been higher while
+ * there were more entries queued previously but as this code is used only by
+ * the scheduler, entries are short lived and will be torn down regularly.
+ *
+ * 00-03 entries - 1 level
+ * 04-07 entries - 2 levels
+ * 08-15 entries - 4 levels
+ * 15-31 entries - 7 levels
+ * 32+ entries - max(16) levels
+ */
+static inline unsigned int randomLevel(int entries, unsigned int randseed)
+{
+ unsigned int mask;
+
+ if (entries > 31)
+ mask = 0xF;
+ else if (entries > 15)
+ mask = 0x7;
+ else if (entries > 7)
+ mask = 0x3;
+ else if (entries > 3)
+ mask = 0x1;
+ else
+ return 0;
+
+ return randseed & mask;
+}
+
+skiplist_node *skiplist_insert(skiplist_node *slnode, skiplist *l, keyType key, valueType value, unsigned int randseed)
+{
+ skiplist_node *update[MaxNumberOfLevels];
+ skiplist_node *p, *q;
+ int k = l->level;
+
+ p = slnode;
+ do {
+ while (q = p->next[k], q->key <= key)
+ p = q;
+ update[k] = p;
+ } while (--k >= 0);
+
+ k = randomLevel(++l->entries, randseed);
+ if (k > l->level) {
+ k = ++l->level;
+ update[k] = slnode;
+ }
+
+ q = newNode;
+ q->level = k;
+ q->key = key;
+ q->value = value;
+ do {
+ p = update[k];
+ q->next[k] = p->next[k];
+ p->next[k] = q;
+ q->prev[k] = p;
+ q->next[k]->prev[k] = q;
+ } while (--k >= 0);
+ return q;
+}
+
+void skiplist_delnode(skiplist_node *slnode, skiplist *l, skiplist_node *node)
+{
+ int k, m = node->level;
+
+ for (k = 0; k <= m; k++) {
+ node->prev[k]->next[k] = node->next[k];
+ node->next[k]->prev[k] = node->prev[k];
+ }
+ kfree(node);
+ if (m == l->level) {
+ while (l->header->next[m] == slnode && l->header->prev[m] == slnode && m > 0)
+ m--;
+ l->level = m;
+ }
+ l->entries--;
+}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 343a2b7e5..07f28251f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2872,7 +2872,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
struct page *page;
pgtable_t pgtable;
pmd_t _pmd;
- bool young, write, dirty;
+ bool young, write, dirty, soft_dirty;
unsigned long addr;
int i;
@@ -2898,6 +2898,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
write = pmd_write(*pmd);
young = pmd_young(*pmd);
dirty = pmd_dirty(*pmd);
+ soft_dirty = pmd_soft_dirty(*pmd);
pmdp_huge_split_prepare(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -2914,6 +2915,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t swp_entry;
swp_entry = make_migration_entry(page + i, write);
entry = swp_entry_to_pte(swp_entry);
+ if (soft_dirty)
+ entry = pte_swp_mksoft_dirty(entry);
} else {
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(entry, vma);
@@ -2921,6 +2924,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
+ if (soft_dirty)
+ entry = pte_mksoft_dirty(entry);
}
if (dirty)
SetPageDirty(page + i);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4c03a8fb4..a3e27ba36 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3266,53 +3266,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
}
-static inline bool
-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
- enum compact_result compact_result, enum migrate_mode *migrate_mode,
- int compaction_retries)
-{
- int max_retries = MAX_COMPACT_RETRIES;
-
- if (!order)
- return false;
-
- /*
- * compaction considers all the zone as desperately out of memory
- * so it doesn't really make much sense to retry except when the
- * failure could be caused by weak migration mode.
- */
- if (compaction_failed(compact_result)) {
- if (*migrate_mode == MIGRATE_ASYNC) {
- *migrate_mode = MIGRATE_SYNC_LIGHT;
- return true;
- }
- return false;
- }
-
- /*
- * make sure the compaction wasn't deferred or didn't bail out early
- * due to locks contention before we declare that we should give up.
- * But do not retry if the given zonelist is not suitable for
- * compaction.
- */
- if (compaction_withdrawn(compact_result))
- return compaction_zonelist_suitable(ac, order, alloc_flags);
-
- /*
- * !costly requests are much more important than __GFP_REPEAT
- * costly ones because they are de facto nofail and invoke OOM
- * killer to move on while costly can fail and users are ready
- * to cope with that. 1/4 retries is rather arbitrary but we
- * would need much more detailed feedback from compaction to
- * make a better decision.
- */
- if (order > PAGE_ALLOC_COSTLY_ORDER)
- max_retries /= 4;
- if (compaction_retries <= max_retries)
- return true;
-
- return false;
-}
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3323,6 +3276,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
}
+#endif /* CONFIG_COMPACTION */
+
static inline bool
should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
enum compact_result compact_result,
@@ -3349,7 +3304,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
}
return false;
}
-#endif /* CONFIG_COMPACTION */
/* Perform direct synchronous page reclaim */
static int
diff --git a/mm/readahead.c b/mm/readahead.c
index 40be3ae0a..7f9f8c346 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/dax.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/blkdev.h>
@@ -545,6 +546,14 @@ do_readahead(struct address_space *mapping, struct file *filp,
if (!mapping || !mapping->a_ops)
return -EINVAL;
+ /*
+ * Readahead doesn't make sense for DAX inodes, but we don't want it
+ * to report a failure either. Instead, we just return success and
+ * don't do any work.
+ */
+ if (dax_mapping(mapping))
+ return 0;
+
return force_page_cache_readahead(mapping, filp, index, nr);
}
diff --git a/mm/slub.c b/mm/slub.c
index f116705e3..b77d76f66 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3479,6 +3479,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
*/
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{
+ LIST_HEAD(discard);
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -3486,13 +3487,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
- discard_slab(s, page);
+ list_add(&page->lru, &discard);
} else {
list_slab_objects(s, page,
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
spin_unlock_irq(&n->list_lock);
+
+ list_for_each_entry_safe(page, h, &discard, lru)
+ discard_slab(s, page);
}
/*
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 0c12e4001..8cc49c044 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
- skb_queue_purge(&sdata->u.ap.ps.bc_buf);
+ ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
mutex_lock(&local->mtx);
ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 203044379..8bad2ad81 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -365,7 +365,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
skb = skb_dequeue(&ps->bc_buf);
if (skb) {
purged++;
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(&local->hw, skb);
}
total += skb_queue_len(&ps->bc_buf);
}
@@ -448,7 +448,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
ps_dbg(tx->sdata,
"BC TX buffer full - dropping the oldest frame\n");
- dev_kfree_skb(skb_dequeue(&ps->bc_buf));
+ ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
} else
tx->local->total_ps_buffered++;
@@ -4055,7 +4055,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
break;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
info = IEEE80211_SKB_CB(skb);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e64ae93d5..bf4b0e98f 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -340,12 +340,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
}
static struct gss_upcall_msg *
-__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid)
+__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
{
struct gss_upcall_msg *pos;
list_for_each_entry(pos, &pipe->in_downcall, list) {
if (!uid_eq(pos->uid, uid))
continue;
+ if (auth && pos->auth->service != auth->service)
+ continue;
atomic_inc(&pos->count);
dprintk("RPC: %s found msg %p\n", __func__, pos);
return pos;
@@ -365,7 +367,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg)
struct gss_upcall_msg *old;
spin_lock(&pipe->lock);
- old = __gss_find_upcall(pipe, gss_msg->uid);
+ old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
if (old == NULL) {
atomic_inc(&gss_msg->count);
list_add(&gss_msg->list, &pipe->in_downcall);
@@ -714,7 +716,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
err = -ENOENT;
/* Find a matching upcall */
spin_lock(&pipe->lock);
- gss_msg = __gss_find_upcall(pipe, uid);
+ gss_msg = __gss_find_upcall(pipe, uid, NULL);
if (gss_msg == NULL) {
spin_unlock(&pipe->lock);
goto err_put_ctx;
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index e7fd76975..66c9d63f4 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -271,14 +271,12 @@ struct rpc_xprt *xprt_iter_next_entry_multiple(struct rpc_xprt_iter *xpi,
xprt_switch_find_xprt_t find_next)
{
struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
- struct list_head *head;
if (xps == NULL)
return NULL;
- head = &xps->xps_xprt_list;
- if (xps->xps_nxprts < 2)
- return xprt_switch_find_first_entry(head);
- return xprt_switch_set_next_cursor(head, &xpi->xpi_cursor, find_next);
+ return xprt_switch_set_next_cursor(&xps->xps_xprt_list,
+ &xpi->xpi_cursor,
+ find_next);
}
static
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7e2b2fa18..167cf5931 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2278,6 +2278,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ break;
+ case -EADDRNOTAVAIL:
+ /* Source port number is unavailable. Try a new one! */
+ transport->srcport = 0;
}
out:
return ret;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 89dacf9b4..160c7f713 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -906,20 +906,23 @@ static int azx_resume(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
struct hda_intel *hda;
+ struct hdac_bus *bus;
if (!card)
return 0;
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ bus = azx_bus(chip);
if (chip->disabled || hda->init_failed || !chip->running)
return 0;
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
- && hda->need_i915_power) {
- snd_hdac_display_power(azx_bus(chip), true);
- snd_hdac_i915_set_bclk(azx_bus(chip));
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ snd_hdac_display_power(bus, true);
+ if (hda->need_i915_power)
+ snd_hdac_i915_set_bclk(bus);
}
+
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
@@ -929,6 +932,11 @@ static int azx_resume(struct device *dev)
hda_intel_init_chip(chip, true);
+ /* power down again for link-controlled chips */
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+ !hda->need_i915_power)
+ snd_hdac_display_power(bus, false);
+
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
@@ -1008,6 +1016,7 @@ static int azx_runtime_resume(struct device *dev)
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ bus = azx_bus(chip);
if (chip->disabled || hda->init_failed)
return 0;
@@ -1015,15 +1024,9 @@ static int azx_runtime_resume(struct device *dev)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- bus = azx_bus(chip);
- if (hda->need_i915_power) {
- snd_hdac_display_power(bus, true);
+ snd_hdac_display_power(bus, true);
+ if (hda->need_i915_power)
snd_hdac_i915_set_bclk(bus);
- } else {
- /* toggle codec wakeup bit for STATESTS read */
- snd_hdac_set_codec_wakeup(bus, true);
- snd_hdac_set_codec_wakeup(bus, false);
- }
}
/* Read STATESTS before controller reset */
@@ -1043,6 +1046,11 @@ static int azx_runtime_resume(struct device *dev)
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
~STATESTS_INT_MASK);
+ /* power down again for link-controlled chips */
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
+ !hda->need_i915_power)
+ snd_hdac_display_power(bus, false);
+
trace_azx_runtime_resume(chip);
return 0;
}
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 204cc074a..41aa3355e 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
if (err < 0) {
line6pcm->impulse_volume = 0;
- line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
return err;
}
} else {
@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
spin_lock_irqsave(&pstr->lock, flags);
clear_bit(type, &pstr->running);
if (!pstr->running) {
+ spin_unlock_irqrestore(&pstr->lock, flags);
line6_unlink_audio_urbs(line6pcm, pstr);
+ spin_lock_irqsave(&pstr->lock, flags);
if (direction == SNDRV_PCM_STREAM_CAPTURE) {
line6pcm->prev_fbuf = NULL;
line6pcm->prev_fsize = 0;
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index daf81d169..45dd34874 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
static ssize_t serial_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%u\n", pod->serial_number);
}
@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev,
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
pod->firmware_version % 100);
@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev,
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *interface = to_usb_interface(dev);
- struct usb_line6_pod *pod = usb_get_intfdata(interface);
+ struct snd_card *card = dev_to_snd_card(dev);
+ struct usb_line6_pod *pod = card->private_data;
return sprintf(buf, "%d\n", pod->device_id);
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6adde457b..6cf1f3597 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1128,6 +1128,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
{
/* devices which do not support reading the sample rate. */
switch (chip->usb_id) {
+ case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
@@ -1138,6 +1139,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index a07b9605e..853b26d85 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -501,7 +501,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
struct intel_pt_recording *ptr =
container_of(itr, struct intel_pt_recording, itr);
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
- bool have_timing_info;
+ bool have_timing_info, need_immediate = false;
struct perf_evsel *evsel, *intel_pt_evsel = NULL;
const struct cpu_map *cpus = evlist->cpus;
bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
@@ -655,6 +655,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
ptr->have_sched_switch = 3;
} else {
opts->record_switch_events = true;
+ need_immediate = true;
if (cpu_wide)
ptr->have_sched_switch = 3;
else
@@ -700,6 +701,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
tracking_evsel->attr.freq = 0;
tracking_evsel->attr.sample_period = 1;
+ if (need_immediate)
+ tracking_evsel->immediate = true;
+
/* In per-cpu case, always need the time of mmap events etc */
if (!cpu_map__empty(cpus)) {
perf_evsel__set_sample_bit(tracking_evsel, TIME);
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 1dc140c54..3f02bea10 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -87,6 +87,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (mem->operation & MEM_OPERATION_LOAD)
perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
+ if (mem->operation & MEM_OPERATION_STORE)
+ perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
+
if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
rec_argv[i++] = "-W";
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 87a297dd8..c33efb9d4 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -827,7 +827,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
sec = syms_ss->symtab;
shdr = syms_ss->symshdr;
- if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
+ if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
+ ".text", NULL))
dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
if (runtime_ss->opdsec)
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index c91986685..1bfacc8e5 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
#include <linux/libnvdimm.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
@@ -1480,6 +1481,7 @@ static int nfit_test_probe(struct platform_device *pdev)
if (nfit_test->setup != nfit_test0_setup)
return 0;
+ flush_work(&acpi_desc->work);
nfit_test->setup_hotplug = 1;
nfit_test->setup(nfit_test);