summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/block/queue-sysfs.txt13
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts15
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mm/abort-lv4t.S34
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi2
-rw-r--r--arch/h8300/include/asm/thread_info.h4
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/mips/include/asm/kvm_host.h7
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kvm/emulate.c32
-rw-r--r--arch/parisc/kernel/syscall.S11
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/tlb.h12
-rw-r--r--arch/powerpc/kernel/idle_book3s.S35
-rw-r--r--arch/powerpc/mm/tlb-radix.c8
-rw-r--r--arch/s390/kvm/sthyi.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--block/Kconfig1
-rw-r--r--block/Kconfig.iosched11
-rw-r--r--block/Makefile2
-rw-r--r--block/bfq-cgroup.c12
-rw-r--r--block/bfq-iosched.c24
-rw-r--r--block/bfq.h2
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-mq-sysfs.c47
-rw-r--r--block/blk-mq.c40
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-settings.c15
-rw-r--r--block/blk-stat.c221
-rw-r--r--block/blk-stat.h18
-rw-r--r--block/blk-sysfs.c151
-rw-r--r--block/cfq-iosched.c13
-rw-r--r--drivers/android/binder.c35
-rw-r--r--drivers/char/virtio_console.c22
-rw-r--r--drivers/cpufreq/intel_pstate.c29
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/firewire/net.c59
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c99
-rw-r--r--drivers/gpu/drm/drm_atomic.c9
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c68
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c122
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c71
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c24
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c53
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/hv_util.c10
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/i2c-core.c13
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c1
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c7
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/file.c22
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c5
-rw-r--r--drivers/mtd/ubi/fastmap.c17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/sysfs.c18
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c9
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_debug.c1
-rw-r--r--drivers/spi/spi-fsl-espi.c2
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/wilc1000/host_interface.c1
-rw-r--r--drivers/thermal/intel_powerclamp.c14
-rw-r--r--drivers/tty/vt/vt.c7
-rw-r--r--drivers/usb/chipidea/host.c2
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c41
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c8
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c4
-rw-r--r--drivers/video/fbdev/pxafb.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy.c16
-rw-r--r--drivers/virtio/virtio_ring.c14
-rw-r--r--fs/btrfs/inode.c9
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/nfsd/nfs4state.c10
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c15
-rw-r--r--fs/ubifs/dir.c16
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c3
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/blk_types.h16
-rw-r--r--include/linux/blkdev.h19
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/wbt.h141
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/trace/events/wbt.h153
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--kernel/sched/MuQSS.c31
-rw-r--r--kernel/sched/MuQSS.h15
-rw-r--r--kernel/sched/idle.c10
-rw-r--r--kernel/sched/sched.h5
-rw-r--r--kernel/time/timer.c74
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/wbt.c703
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/mac80211/rx.c24
-rw-r--r--net/netfilter/xt_NFLOG.c1
-rw-r--r--security/keys/Kconfig2
-rw-r--r--security/keys/big_key.c59
-rw-r--r--security/keys/proc.c2
-rw-r--r--sound/core/seq/seq_timer.c4
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/usb/quirks-table.h17
162 files changed, 1181 insertions, 2187 deletions
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index e55103ace..8d55b4bbb 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- This file
+bfq-iosched.txt
+ - BFQ IO scheduler and its tunables
biodoc.txt
- Notes on the Generic Block Layer Rewrite in Linux 2.5
biovecs.txt
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 2847219eb..2a3904030 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -169,18 +169,5 @@ This is the number of bytes the device can write in a single write-same
command. A value of '0' means write-same is not supported by this
device.
-wb_lat_usec (RW)
-----------------
-If the device is registered for writeback throttling, then this file shows
-the target minimum read latency. If this latency is exceeded in a given
-window of time (see wb_window_usec), then the writeback throttling will start
-scaling back writes.
-
-wb_window_usec (RW)
--------------------
-If the device is registered for writeback throttling, then this file shows
-the value of the monitoring window in which we'll look at the target
-latency. See wb_lat_usec.
-
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index e5b649711..c75b64a85 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -309,3 +309,4 @@ Version History
with a reshape in progress.
1.9.0 Add support for RAID level takeover/reshape/region size
and set size reduction.
+1.9.1 Fix activation of existing RAID 4/10 mapped devices
diff --git a/Makefile b/Makefile
index 082d6bc4d..218366218 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 8
-SUBLEVEL = 6
+SUBLEVEL = 7
EXTRAVERSION = -gnu
NAME = Psychotic Stoned Sheep
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index b3df1c60d..386eee6de 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -239,14 +239,25 @@
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
+ cap-sd-highspeed;
cap-mmc-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ /* All direction control is used */
+ st,sig-dir-cmd;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-pin-fbclk;
+ full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
vqmmc-supply = <&vmmci>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdi0_default_mode>;
pinctrl-1 = <&sdi0_sleep_mode>;
- cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
+ /* GPIO218 MMC_CD */
+ cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -549,7 +560,7 @@
/* VMMCI level-shifter enable */
snowball_cfg3 {
pins = "GPIO217_AH12";
- ste,config = <&gpio_out_lo>;
+ ste,config = <&gpio_out_hi>;
};
/* VMMCI level-shifter voltage select */
snowball_cfg4 {
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index f9b6bd306..541647f57 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -23,6 +23,7 @@ config MACH_MVEBU_V7
select CACHE_L2X0
select ARM_CPU_SUSPEND
select MACH_MVEBU_ANY
+ select MVEBU_CLK_COREDIV
config MACH_ARMADA_370
bool "Marvell Armada 370 boards"
@@ -32,7 +33,6 @@ config MACH_ARMADA_370
select CPU_PJ4B
select MACH_MVEBU_V7
select PINCTRL_ARMADA_370
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 370 SoC with device tree.
@@ -50,7 +50,6 @@ config MACH_ARMADA_375
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_375
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 375 SoC with device tree.
@@ -68,7 +67,6 @@ config MACH_ARMADA_38X
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 6d8e8e336..4cdfab31a 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -7,7 +7,7 @@
* : r4 = aborted context pc
* : r5 = aborted context psr
*
- * Returns : r4-r5, r10-r11, r13 preserved
+ * Returns : r4-r5, r9-r11, r13 preserved
*
* Purpose : obtain information about current aborted instruction.
* Note: we read user space. This means we might cause a data
@@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
/* d */ b do_DataAbort @ ldc rd, [rn, #m]
/* e */ b .data_unknown
-/* f */
+/* f */ b .data_unknown
+
+.data_unknown_r9:
+ ldr r9, [sp], #4
.data_unknown: @ Part of jumptable
mov r0, r4
mov r1, r8
@@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
.data_arm_ldmstm:
tst r8, #1 << 21 @ check writeback bit
beq do_DataAbort @ no writeback -> no fixup
+ str r9, [sp, #-4]!
mov r7, #0x11
orr r7, r7, #0x1100
and r6, r8, r7
@@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6, lsl #2 @ Undo increment
addeq r7, r7, r6, lsl #2 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrhpre:
tst r8, #1 << 21 @ Check writeback bit
beq do_DataAbort @ No writeback -> no fixup
.data_arm_lateldrhpost:
+ str r9, [sp, #-4]!
and r9, r8, #0x00f @ get Rm / low nibble of immediate value
tst r8, #1 << 22 @ if (immediate offset)
andne r6, r8, #0xf00 @ { immediate high nibble
@@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
subne r7, r7, r6 @ Undo incrmenet
addeq r7, r7, r6 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrpreconst:
@@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostconst:
movs r6, r8, lsl #20 @ Get offset
beq do_DataAbort @ zero -> no fixup
+ str r9, [sp, #-4]!
and r9, r8, #15 << 16 @ Extract 'n' from instruction
ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
tst r8, #1 << 23 @ Check U bit
subne r7, r7, r6, lsr #20 @ Undo increment
addeq r7, r7, r6, lsr #20 @ Undo decrement
str r7, [r2, r9, lsr #14] @ Put register 'Rn'
+ ldr r9, [sp], #4
b do_DataAbort
.data_arm_lateldrprereg:
@@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
.data_arm_lateldrpostreg:
and r7, r8, #15 @ Extract 'm' from instruction
ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
+ str r9, [sp, #-4]!
mov r9, r8, lsr #7 @ get shift count
ands r9, r9, #31
and r7, r8, #0x70 @ get shift type
@@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
b .data_arm_apply_r6_and_rn
b .data_arm_apply_r6_and_rn @ 1: LSL #0
nop
- b .data_unknown @ 2: MUL?
+ b .data_unknown_r9 @ 2: MUL?
nop
- b .data_unknown @ 3: MUL?
+ b .data_unknown_r9 @ 3: MUL?
nop
mov r6, r6, lsr r9 @ 4: LSR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, lsr #32 @ 5: LSR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ 6: MUL?
+ b .data_unknown_r9 @ 6: MUL?
nop
- b .data_unknown @ 7: MUL?
+ b .data_unknown_r9 @ 7: MUL?
nop
mov r6, r6, asr r9 @ 8: ASR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, asr #32 @ 9: ASR #32
b .data_arm_apply_r6_and_rn
- b .data_unknown @ A: MUL?
+ b .data_unknown_r9 @ A: MUL?
nop
- b .data_unknown @ B: MUL?
+ b .data_unknown_r9 @ B: MUL?
nop
mov r6, r6, ror r9 @ C: ROR #!0
b .data_arm_apply_r6_and_rn
mov r6, r6, rrx @ D: RRX
b .data_arm_apply_r6_and_rn
- b .data_unknown @ E: MUL?
+ b .data_unknown_r9 @ E: MUL?
nop
- b .data_unknown @ F: MUL?
+ b .data_unknown_r9 @ F: MUL?
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
@@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
.data_thumb_pushpop:
tst r8, #1 << 10
beq .data_unknown
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8) + R bit
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
subne r7, r7, r6, lsl #2 @ decrement SP if POP
str r7, [r2, #13 << 2]
+ ldr r9, [sp], #4
b do_DataAbort
.data_thumb_ldmstm:
+ str r9, [sp, #-4]!
and r6, r8, #0x55 @ hweight8(r8)
and r9, r8, #0xaa
add r6, r6, r9, lsr #1
@@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
and r6, r6, #15 @ number of regs to transfer
sub r7, r7, r6, lsl #2 @ always decrement
str r7, [r2, r9, lsr #6]
+ ldr r9, [sp], #4
b do_DataAbort
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index da31bbbbb..399271853 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -131,7 +131,7 @@
#address-cells = <0x1>;
#size-cells = <0x0>;
cell-index = <1>;
- clocks = <&cpm_syscon0 0 3>;
+ clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index b408fe660..3cef06875 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit;
- struct restart_block restart_block;
};
/*
@@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ad1f81f57..7138303cb 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index b54bcadd8..45799ef23 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -279,7 +279,10 @@ struct kvm_vcpu_arch {
/* Host KSEG0 address of the EI/DI offset */
void *kseg0_commpage;
- u32 io_gpr; /* GPR used as IO source/target */
+ /* Resume PC after MMIO completion */
+ unsigned long io_pc;
+ /* GPR used as IO source/target */
+ u32 io_gpr;
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
@@ -301,8 +304,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- u32 pending_load_cause;
-
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index ca1cc30c0..1958910b7 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
#if defined(CONFIG_USE_OF)
/* Get any additional entropy passed in device tree */
- {
+ if (initial_boot_params) {
int node, len;
u64 *prop;
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 43853ec6e..4d65285ca 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -791,15 +791,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+ kvm_clear_c0_guest_status(cop0, ST0_ERL);
+ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
- } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
- kvm_clear_c0_guest_status(cop0, ST0_ERL);
- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else {
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc);
@@ -1522,13 +1522,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
+ unsigned long curr_pc;
u32 op, rt;
u32 bytes;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
- vcpu->arch.pending_load_cause = cause;
+ /*
+ * Find the resume PC now while we have safe and easy access to the
+ * prior branch instruction, and save it for
+ * kvm_mips_complete_mmio_load() to restore later.
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+ vcpu->arch.io_pc = vcpu->arch.pc;
+ vcpu->arch.pc = curr_pc;
+
vcpu->arch.io_gpr = rt;
switch (op) {
@@ -2488,9 +2500,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}
- er = update_pc(vcpu, vcpu->arch.pending_load_cause);
- if (er == EMULATE_FAIL)
- return er;
+ /* Restore saved resume PC */
+ vcpu->arch.pc = vcpu->arch.io_pc;
switch (run->mmio.len) {
case 4:
@@ -2512,11 +2523,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
break;
}
- if (vcpu->arch.pending_load_cause & CAUSEF_BD)
- kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
- vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
- vcpu->mmio_needed);
-
done:
return er;
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d03422e5f..7ed036c57 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -106,8 +106,6 @@ linux_gateway_entry:
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 01b8a13f0..391933296 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmp cr0,r0,r0; \
+1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f6f68f73e..99e1397b7 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return cpumask_equal(mm_cpumask(mm),
+ cpumask_of(smp_processor_id()));
+}
+
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return 1;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index bd739fed2..72dac0b58 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
+ * r9 - used as a temporary variable
*/
core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_held
blr
/*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
- li r4,KVM_HWTHREAD_IN_IDLE
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
-
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /******************************************************/
+ /* N O T E W E L L ! ! ! N O T E W E L L */
+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
+ /* MUST occur in real mode, i.e. with the MMU off, */
+ /* and the MMU must stay off until we clear this flag */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
+ /* reset interrupt vector in exceptions-64s.S. */
+ /* The reason is that another thread can switch the */
+ /* MMU to a guest context whenever this flag is set */
+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
+ /* that would potentially cause this thread to start */
+ /* executing instructions from guest memory in */
+ /* hypervisor mode, leading to a host crash or data */
+ /* corruption, or worse. */
+ /******************************************************/
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
* r3 - requested stop state
*/
power_enter_stop:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /* DO THIS IN REAL MODE! See comment above. */
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
/*
* Check if the requested state is a deep idle state.
*/
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 48df05ef5..d69606819 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{
unsigned long pid;
unsigned long addr;
- int local = mm_is_core_local(mm);
+ int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index bd98b7d25..05c98bb85 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
if (r < 0)
goto out;
- diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_buf || diag224(diag224_buf))
goto out;
@@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
sctns->par.infpval1 |= PAR_WGHT_VLD;
out:
- kfree(diag224_buf);
+ free_page((unsigned long)diag224_buf);
vfree(diag204_buf);
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index f1e551ff5..d4045df5b 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
* We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address.
*/
- cont = __pa(container);
+ cont = __pa_nodebug(container);
cont_va = container;
#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 98c9cd6f3..d5219b1c8 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1222,11 +1222,16 @@ void __init setup_arch(char **cmdline_p)
if (smp_found_config)
get_smp_config();
+ /*
+ * Systems w/o ACPI and mptables might not have it mapped the local
+ * APIC yet, but prefill_possible_map() might need to access it.
+ */
+ init_apic_mappings();
+
prefill_possible_map();
init_cpu_to_node();
- init_apic_mappings();
io_apic_init_mappings();
kvm_guest_init();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4e95d3eb2..cbd7b9258 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5045,7 +5045,7 @@ done_prefixes:
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
- if (ctxt->rip_relative)
+ if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 699f87265..46f74d461 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7372,10 +7372,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+
kvmclock_reset(vcpu);
- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
kvm_x86_ops->vcpu_free(vcpu);
+ free_cpumask_var(wbinvd_dirty_mask);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
diff --git a/block/Kconfig b/block/Kconfig
index 6da79e670..161491d0a 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -4,7 +4,6 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
- select WBT
help
Provide block layer support for the kernel.
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 6d9257924..277b112b8 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -43,13 +43,10 @@ config IOSCHED_BFQ
tristate "BFQ I/O scheduler"
default n
---help---
- The BFQ I/O scheduler tries to distribute bandwidth among
- all processes according to their weights.
- It aims at distributing the bandwidth as desired, independently of
- the disk parameters and with any workload. It also tries to
- guarantee low latency to interactive and soft real-time
- applications. If compiled built-in (saying Y here), BFQ can
- be configured to support hierarchical scheduling.
+ The BFQ I/O scheduler distributes bandwidth among all
+ processes according to their weights, regardless of the
+ device parameters and with any workload. It also guarantees
+ a low latency to interactive and soft real-time applications.
config BFQ_GROUP_IOSCHED
bool "BFQ hierarchical scheduling support"
diff --git a/block/Makefile b/block/Makefile
index b7aa613b4..4a3668393 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
- blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
+ blk-lib.o blk-mq.o blk-mq-tag.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 569988bda..b50ae8ec6 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -649,18 +649,6 @@ static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
- /*
- * If we have a non-root cgroup, we can depend on that to
- * do proper throttling of writes. Turn off wbt for that
- * case.
- */
- if (bio_blkcg(bio) != &blkcg_root) {
- struct request_queue *q = bfqd->queue;
-
- if (q->rq_wb)
- wbt_disable(q->rq_wb);
- }
-
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
bic->blkcg_serial_nr = serial_nr;
out:
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index ecb949ec6..0f3081dbd 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1,5 +1,5 @@
/*
- * Budget Fair Queueing (BFQ) disk scheduler.
+ * Budget Fair Queueing (BFQ) I/O scheduler.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
@@ -77,19 +77,19 @@
static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
/* Maximum backwards seek, in KiB. */
-static const int bfq_back_max = 16 * 1024;
+static const int bfq_back_max = (16 * 1024);
/* Penalty of a backwards seek, in number of sectors. */
static const int bfq_back_penalty = 2;
/* Idling period duration, in ns. */
-static u32 bfq_slice_idle = NSEC_PER_SEC / 125;
+static u32 bfq_slice_idle = (NSEC_PER_SEC / 125);
/* Minimum number of assigned budgets for which stats are safe to compute. */
static const int bfq_stats_min_budgets = 194;
/* Default maximum budget values, in sectors and number of requests. */
-static const int bfq_default_max_budget = 16 * 1024;
+static const int bfq_default_max_budget = (16 * 1024);
/*
* Async to sync throughput distribution is controlled as follows:
@@ -99,7 +99,7 @@ static const int bfq_default_max_budget = 16 * 1024;
static const int bfq_async_charge_factor = 10;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
-static const int bfq_timeout = HZ / 8;
+static const int bfq_timeout = (HZ / 8);
struct kmem_cache *bfq_pool;
@@ -117,7 +117,7 @@ struct kmem_cache *bfq_pool;
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
/* Min observation time interval required to perform a peak-rate update (ns) */
-#define BFQ_RATE_MIN_INTERVAL 300*NSEC_PER_MSEC
+#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
/* Target observation time interval for a peak-rate update (ns) */
#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
@@ -2179,9 +2179,13 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
* not only expires, but also remains with no
* request.
*/
- bfqq->last_wr_start_finish += jiffies -
- max_t(unsigned long, bfqq->last_wr_start_finish,
- bfqq->budget_timeout);
+ if (time_after(bfqq->budget_timeout,
+ bfqq->last_wr_start_finish))
+ bfqq->last_wr_start_finish +=
+ jiffies - bfqq->budget_timeout;
+ else
+ bfqq->last_wr_start_finish = jiffies;
+
if (time_is_after_jiffies(bfqq->last_wr_start_finish)) {
pr_crit(
"BFQ WARNING:last %lu budget %lu jiffies %lu",
@@ -5208,7 +5212,7 @@ static struct blkcg_policy blkcg_policy_bfq = {
static int __init bfq_init(void)
{
int ret;
- char msg[50] = "BFQ I/O-scheduler: v8r4";
+ char msg[50] = "BFQ I/O-scheduler: v8r5";
#ifdef CONFIG_BFQ_GROUP_IOSCHED
ret = blkcg_policy_register(&blkcg_policy_bfq);
diff --git a/block/bfq.h b/block/bfq.h
index ea1e7d852..b80abe0e3 100644
--- a/block/bfq.h
+++ b/block/bfq.h
@@ -1,5 +1,5 @@
/*
- * BFQ-v8r4 for 4.8.0: data structures and common functions prototypes.
+ * BFQ-v8r5 for 4.8.0: data structures and common functions prototypes.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
diff --git a/block/blk-core.c b/block/blk-core.c
index cdcb188e8..36c7ac328 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,7 +33,6 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
-#include <linux/wbt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -883,8 +882,6 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
fail:
blk_free_flush_queue(q->fq);
- wbt_exit(q->rq_wb);
- q->rq_wb = NULL;
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1349,7 +1346,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
- wbt_requeue(q->rq_wb, &rq->wb_stat);
if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
@@ -1440,8 +1436,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);
- wbt_done(q->rq_wb, &req->wb_stat);
-
/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
@@ -1673,7 +1667,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
- unsigned int wb_acct;
/*
* low level driver can indicate that it wants pages above a
@@ -1726,8 +1719,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}
get_rq:
- wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, q->queue_lock);
-
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
@@ -1747,14 +1738,11 @@ get_rq:
*/
req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
- __wbt_done(q->rq_wb, wb_acct);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
- wbt_track(&req->wb_stat, wb_acct);
-
/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
@@ -2487,8 +2475,6 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);
- wbt_issue(req->q->rq_wb, &req->wb_stat);
-
/*
* We are now handing the request to the hardware, initialize
* resid_len to full count and add the timeout handler.
@@ -2556,8 +2542,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
trace_block_rq_complete(req->q, req, nr_bytes);
- blk_stat_add(&req->q->rq_stats[rq_data_dir(req)], req);
-
if (!req->bio)
return false;
@@ -2725,10 +2709,9 @@ void blk_finish_request(struct request *req, int error)
blk_account_io_done(req);
- if (req->end_io) {
- wbt_done(req->q->rq_wb, &req->wb_stat);
+ if (req->end_io)
req->end_io(req, error);
- } else {
+ else {
if (blk_bidi_rq(req))
__blk_put_request(req->next_rq->q, req->next_rq);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b66bbf13c..fe822aa5b 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -247,47 +247,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret;
}
-static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
-{
- struct blk_mq_ctx *ctx;
- unsigned int i;
-
- hctx_for_each_ctx(hctx, ctx, i) {
- blk_stat_init(&ctx->stat[0]);
- blk_stat_init(&ctx->stat[1]);
- }
-}
-
-static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
- const char *page, size_t count)
-{
- blk_mq_stat_clear(hctx);
- return count;
-}
-
-static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
-{
- return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
- pre, (long long) stat->nr_samples,
- (long long) stat->mean, (long long) stat->min,
- (long long) stat->max);
-}
-
-static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
-{
- struct blk_rq_stat stat[2];
- ssize_t ret;
-
- blk_stat_init(&stat[0]);
- blk_stat_init(&stat[1]);
-
- blk_hctx_stat_get(hctx, stat);
-
- ret = print_stat(page, &stat[0], "read :");
- ret += print_stat(page + ret, &stat[1], "write:");
- return ret;
-}
-
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -345,11 +304,6 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
.attr = {.name = "io_poll", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
};
-static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
- .attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
- .show = blk_mq_hw_sysfs_stat_show,
- .store = blk_mq_hw_sysfs_stat_store,
-};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -360,7 +314,6 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
&blk_mq_hw_sysfs_poll.attr,
- &blk_mq_hw_sysfs_stat.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 815e2ac02..c207fa987 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -22,7 +22,6 @@
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
-#include <linux/wbt.h>
#include <trace/events/block.h>
@@ -30,7 +29,6 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
-#include "blk-stat.h"
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
@@ -332,8 +330,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_flags & REQ_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
-
- wbt_done(q->rq_wb, &rq->wb_stat);
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
@@ -366,7 +362,6 @@ inline void __blk_mq_end_request(struct request *rq, int error)
blk_account_io_done(rq);
if (rq->end_io) {
- wbt_done(rq->q->rq_wb, &rq->wb_stat);
rq->end_io(rq, error);
} else {
if (unlikely(blk_bidi_rq(rq)))
@@ -417,19 +412,10 @@ static void blk_mq_ipi_complete_request(struct request *rq)
put_cpu();
}
-static void blk_mq_stat_add(struct request *rq)
-{
- struct blk_rq_stat *stat = &rq->mq_ctx->stat[rq_data_dir(rq)];
-
- blk_stat_add(stat, rq);
-}
-
static void __blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
- blk_mq_stat_add(rq);
-
if (!q->softirq_done_fn)
blk_mq_end_request(rq, rq->errors);
else
@@ -473,8 +459,6 @@ void blk_mq_start_request(struct request *rq)
if (unlikely(blk_bidi_rq(rq)))
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
- wbt_issue(q->rq_wb, &rq->wb_stat);
-
blk_add_timer(rq);
/*
@@ -510,7 +494,6 @@ static void __blk_mq_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
trace_block_rq_requeue(q, rq);
- wbt_requeue(q->rq_wb, &rq->wb_stat);
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -1329,7 +1312,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
- unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1344,15 +1326,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
return BLK_QC_T_NONE;
- wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL);
-
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq)) {
- __wbt_done(q->rq_wb, wb_acct);
+ if (unlikely(!rq))
return BLK_QC_T_NONE;
- }
-
- wbt_track(&rq->wb_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1429,7 +1405,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
- unsigned int wb_acct;
blk_queue_bounce(q, &bio);
@@ -1446,15 +1421,9 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
} else
request_count = blk_plug_queued_count(q);
- wb_acct = wbt_wait(q->rq_wb, bio->bi_opf, NULL);
-
rq = blk_mq_map_request(q, bio, &data);
- if (unlikely(!rq)) {
- __wbt_done(q->rq_wb, wb_acct);
+ if (unlikely(!rq))
return BLK_QC_T_NONE;
- }
-
- wbt_track(&rq->wb_stat, wb_acct);
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
@@ -1838,8 +1807,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
spin_lock_init(&__ctx->lock);
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
- blk_stat_init(&__ctx->stat[0]);
- blk_stat_init(&__ctx->stat[1]);
/* If the cpu isn't online, the cpu is mapped to first hctx */
if (!cpu_online(i))
@@ -2178,9 +2145,6 @@ void blk_mq_free_queue(struct request_queue *q)
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
- wbt_exit(q->rq_wb);
- q->rq_wb = NULL;
-
blk_mq_del_queue_tag_set(q);
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e107f700f..9087b1103 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -1,8 +1,6 @@
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H
-#include "blk-stat.h"
-
struct blk_mq_tag_set;
struct blk_mq_ctx {
@@ -22,7 +20,6 @@ struct blk_mq_ctx {
/* incremented at completion time */
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
- struct blk_rq_stat stat[2];
struct request_queue *queue;
struct kobject kobj;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 746dc9fee..f679ae122 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -832,19 +832,6 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
/**
- * blk_set_queue_depth - tell the block layer about the device queue depth
- * @q: the request queue for the device
- * @depth: queue depth
- *
- */
-void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
-{
- q->queue_depth = depth;
- wbt_set_queue_depth(q->rq_wb, depth);
-}
-EXPORT_SYMBOL(blk_set_queue_depth);
-
-/**
* blk_queue_write_cache - configure queue's write cache
* @q: the request queue for the device
* @wc: write back cache on or off
@@ -864,8 +851,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
-
- wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
diff --git a/block/blk-stat.c b/block/blk-stat.c
deleted file mode 100644
index bdb16d84b..000000000
--- a/block/blk-stat.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Block stat tracking code
- *
- * Copyright (C) 2016 Jens Axboe
- */
-#include <linux/kernel.h>
-#include <linux/blk-mq.h>
-
-#include "blk-stat.h"
-#include "blk-mq.h"
-
-static void blk_stat_flush_batch(struct blk_rq_stat *stat)
-{
- if (!stat->nr_batch)
- return;
- if (!stat->nr_samples)
- stat->mean = div64_s64(stat->batch, stat->nr_batch);
- else {
- stat->mean = div64_s64((stat->mean * stat->nr_samples) +
- stat->batch,
- stat->nr_samples + stat->nr_batch);
- }
-
- stat->nr_samples += stat->nr_batch;
- stat->nr_batch = stat->batch = 0;
-}
-
-void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
-{
- if (!src->nr_samples)
- return;
-
- blk_stat_flush_batch(src);
-
- dst->min = min(dst->min, src->min);
- dst->max = max(dst->max, src->max);
-
- if (!dst->nr_samples)
- dst->mean = src->mean;
- else {
- dst->mean = div64_s64((src->mean * src->nr_samples) +
- (dst->mean * dst->nr_samples),
- dst->nr_samples + src->nr_samples);
- }
- dst->nr_samples += src->nr_samples;
-}
-
-static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
-{
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
- uint64_t latest = 0;
- int i, j, nr;
-
- blk_stat_init(&dst[0]);
- blk_stat_init(&dst[1]);
-
- nr = 0;
- do {
- uint64_t newest = 0;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- hctx_for_each_ctx(hctx, ctx, j) {
- if (!ctx->stat[0].nr_samples &&
- !ctx->stat[1].nr_samples)
- continue;
- if (ctx->stat[0].time > newest)
- newest = ctx->stat[0].time;
- if (ctx->stat[1].time > newest)
- newest = ctx->stat[1].time;
- }
- }
-
- /*
- * No samples
- */
- if (!newest)
- break;
-
- if (newest > latest)
- latest = newest;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- hctx_for_each_ctx(hctx, ctx, j) {
- if (ctx->stat[0].time == newest) {
- blk_stat_sum(&dst[0], &ctx->stat[0]);
- nr++;
- }
- if (ctx->stat[1].time == newest) {
- blk_stat_sum(&dst[1], &ctx->stat[1]);
- nr++;
- }
- }
- }
- /*
- * If we race on finding an entry, just loop back again.
- * Should be very rare.
- */
- } while (!nr);
-
- dst[0].time = dst[1].time = latest;
-}
-
-void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
-{
- if (q->mq_ops)
- blk_mq_stat_get(q, dst);
- else {
- memcpy(&dst[0], &q->rq_stats[0], sizeof(struct blk_rq_stat));
- memcpy(&dst[1], &q->rq_stats[1], sizeof(struct blk_rq_stat));
- }
-}
-
-void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
-{
- struct blk_mq_ctx *ctx;
- unsigned int i, nr;
-
- nr = 0;
- do {
- uint64_t newest = 0;
-
- hctx_for_each_ctx(hctx, ctx, i) {
- if (!ctx->stat[0].nr_samples &&
- !ctx->stat[1].nr_samples)
- continue;
-
- if (ctx->stat[0].time > newest)
- newest = ctx->stat[0].time;
- if (ctx->stat[1].time > newest)
- newest = ctx->stat[1].time;
- }
-
- if (!newest)
- break;
-
- hctx_for_each_ctx(hctx, ctx, i) {
- if (ctx->stat[0].time == newest) {
- blk_stat_sum(&dst[0], &ctx->stat[0]);
- nr++;
- }
- if (ctx->stat[1].time == newest) {
- blk_stat_sum(&dst[1], &ctx->stat[1]);
- nr++;
- }
- }
- /*
- * If we race on finding an entry, just loop back again.
- * Should be very rare, as the window is only updated
- * occasionally
- */
- } while (!nr);
-}
-
-static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
-{
- stat->min = -1ULL;
- stat->max = stat->nr_samples = stat->mean = 0;
- stat->batch = stat->nr_batch = 0;
- stat->time = time_now & BLK_STAT_MASK;
-}
-
-void blk_stat_init(struct blk_rq_stat *stat)
-{
- __blk_stat_init(stat, ktime_to_ns(ktime_get()));
-}
-
-static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now)
-{
- return (now & BLK_STAT_MASK) == (stat->time & BLK_STAT_MASK);
-}
-
-bool blk_stat_is_current(struct blk_rq_stat *stat)
-{
- return __blk_stat_is_current(stat, ktime_to_ns(ktime_get()));
-}
-
-void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
-{
- s64 now, value;
- u64 rq_time = wbt_issue_stat_get_time(&rq->wb_stat);
-
- now = ktime_to_ns(ktime_get());
- if (now < rq_time)
- return;
-
- if (!__blk_stat_is_current(stat, now))
- __blk_stat_init(stat, now);
-
- value = now - rq_time;
- if (value > stat->max)
- stat->max = value;
- if (value < stat->min)
- stat->min = value;
-
- if (stat->batch + value < stat->batch ||
- stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
- blk_stat_flush_batch(stat);
-
- stat->batch += value;
- stat->nr_batch++;
-}
-
-void blk_stat_clear(struct request_queue *q)
-{
- if (q->mq_ops) {
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx;
- int i, j;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- hctx_for_each_ctx(hctx, ctx, j) {
- blk_stat_init(&ctx->stat[0]);
- blk_stat_init(&ctx->stat[1]);
- }
- }
- } else {
- blk_stat_init(&q->rq_stats[0]);
- blk_stat_init(&q->rq_stats[1]);
- }
-}
diff --git a/block/blk-stat.h b/block/blk-stat.h
deleted file mode 100644
index 376a6ccd9..000000000
--- a/block/blk-stat.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef BLK_STAT_H
-#define BLK_STAT_H
-
-/*
- * ~0.13s window as a power-of-2 (2^27 nsecs)
- */
-#define BLK_STAT_NSEC 134217728ULL
-#define BLK_STAT_MASK ~(BLK_STAT_NSEC - 1)
-
-void blk_stat_add(struct blk_rq_stat *, struct request *);
-void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
-void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
-void blk_stat_clear(struct request_queue *q);
-void blk_stat_init(struct blk_rq_stat *);
-void blk_stat_sum(struct blk_rq_stat *, struct blk_rq_stat *);
-bool blk_stat_is_current(struct blk_rq_stat *);
-
-#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 85c3dc223..f87a7e747 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -10,7 +10,6 @@
#include <linux/blktrace_api.h>
#include <linux/blk-mq.h>
#include <linux/blk-cgroup.h>
-#include <linux/wbt.h>
#include "blk.h"
#include "blk-mq.h"
@@ -42,19 +41,6 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
-static ssize_t queue_var_store64(u64 *var, const char *page)
-{
- int err;
- u64 v;
-
- err = kstrtou64(page, 10, &v);
- if (err < 0)
- return err;
-
- *var = v;
- return 0;
-}
-
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
return queue_var_show(q->nr_requests, (page));
@@ -361,58 +347,6 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
-static ssize_t queue_wb_win_show(struct request_queue *q, char *page)
-{
- if (!q->rq_wb)
- return -EINVAL;
-
- return sprintf(page, "%llu\n", div_u64(q->rq_wb->win_nsec, 1000));
-}
-
-static ssize_t queue_wb_win_store(struct request_queue *q, const char *page,
- size_t count)
-{
- ssize_t ret;
- u64 val;
-
- if (!q->rq_wb)
- return -EINVAL;
-
- ret = queue_var_store64(&val, page);
- if (ret < 0)
- return ret;
-
- q->rq_wb->win_nsec = val * 1000ULL;
- wbt_update_limits(q->rq_wb);
- return count;
-}
-
-static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
-{
- if (!q->rq_wb)
- return -EINVAL;
-
- return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
-}
-
-static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
- size_t count)
-{
- ssize_t ret;
- u64 val;
-
- if (!q->rq_wb)
- return -EINVAL;
-
- ret = queue_var_store64(&val, page);
- if (ret < 0)
- return ret;
-
- q->rq_wb->min_lat_nsec = val * 1000ULL;
- wbt_update_limits(q->rq_wb);
- return count;
-}
-
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -450,26 +384,6 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
return queue_var_show(blk_queue_dax(q), page);
}
-static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
-{
- return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
- pre, (long long) stat->nr_samples,
- (long long) stat->mean, (long long) stat->min,
- (long long) stat->max);
-}
-
-static ssize_t queue_stats_show(struct request_queue *q, char *page)
-{
- struct blk_rq_stat stat[2];
- ssize_t ret;
-
- blk_queue_stat_get(q, stat);
-
- ret = print_stat(page, &stat[0], "read :");
- ret += print_stat(page + ret, &stat[1], "write:");
- return ret;
-}
-
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@@ -612,23 +526,6 @@ static struct queue_sysfs_entry queue_dax_entry = {
.show = queue_dax_show,
};
-static struct queue_sysfs_entry queue_stats_entry = {
- .attr = {.name = "stats", .mode = S_IRUGO },
- .show = queue_stats_show,
-};
-
-static struct queue_sysfs_entry queue_wb_lat_entry = {
- .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
- .show = queue_wb_lat_show,
- .store = queue_wb_lat_store,
-};
-
-static struct queue_sysfs_entry queue_wb_win_entry = {
- .attr = {.name = "wbt_window_usec", .mode = S_IRUGO | S_IWUSR },
- .show = queue_wb_win_show,
- .store = queue_wb_win_store,
-};
-
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@@ -656,9 +553,6 @@ static struct attribute *default_attrs[] = {
&queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_dax_entry.attr,
- &queue_stats_entry.attr,
- &queue_wb_lat_entry.attr,
- &queue_wb_win_entry.attr,
NULL,
};
@@ -773,49 +667,6 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
-static void blk_wb_stat_get(void *data, struct blk_rq_stat *stat)
-{
- blk_queue_stat_get(data, stat);
-}
-
-static void blk_wb_stat_clear(void *data)
-{
- blk_stat_clear(data);
-}
-
-static bool blk_wb_stat_is_current(struct blk_rq_stat *stat)
-{
- return blk_stat_is_current(stat);
-}
-
-static struct wb_stat_ops wb_stat_ops = {
- .get = blk_wb_stat_get,
- .is_current = blk_wb_stat_is_current,
- .clear = blk_wb_stat_clear,
-};
-
-static void blk_wb_init(struct request_queue *q)
-{
- struct rq_wb *rwb;
-
- rwb = wbt_init(&q->backing_dev_info, &wb_stat_ops, q);
-
- /*
- * If this fails, we don't get throttling
- */
- if (IS_ERR(rwb))
- return;
-
- if (blk_queue_nonrot(q))
- rwb->min_lat_nsec = 2000000ULL;
- else
- rwb->min_lat_nsec = 75000000ULL;
-
- wbt_set_queue_depth(rwb, blk_queue_depth(q));
- wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
- q->rq_wb = rwb;
-}
-
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -855,8 +706,6 @@ int blk_register_queue(struct gendisk *disk)
if (q->mq_ops)
blk_mq_register_disk(disk);
- blk_wb_init(q);
-
if (!q->request_fn)
return 0;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f336dcbed..5e24d8803 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3771,11 +3771,9 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
uint64_t serial_nr;
- bool nonroot_cg;
rcu_read_lock();
serial_nr = bio_blkcg(bio)->css.serial_nr;
- nonroot_cg = bio_blkcg(bio) != &blkcg_root;
rcu_read_unlock();
/*
@@ -3786,17 +3784,6 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
return;
/*
- * If we have a non-root cgroup, we can depend on that to
- * do proper throttling of writes. Turn off wbt for that
- * case.
- */
- if (nonroot_cg) {
- struct request_queue *q = cfqd->queue;
-
- wbt_disable(q->rq_wb);
- }
-
- /*
* Drop reference to queues. New queues will be assigned in new
* group upon arrival of fresh requests.
*/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 16288e777..4b1e4eabe 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1011,12 +1011,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1286,7 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1381,7 +1388,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1578,7 +1585,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1590,7 +1599,10 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1625,7 +1637,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1679,6 +1693,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1801,7 +1816,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -1997,7 +2014,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5da47e26a..4aae0d27e 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1540,19 +1540,29 @@ static void remove_port_data(struct port *port)
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
+ spin_unlock_irq(&port->inbuf_lock);
/* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->inbuf_lock);
+ do {
+ spin_lock_irq(&port->inbuf_lock);
+ buf = virtqueue_detach_unused_buf(port->in_vq);
+ spin_unlock_irq(&port->inbuf_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
+ spin_unlock_irq(&port->outvq_lock);
/* Free pending buffers from the out-queue. */
- while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
- free_buf(buf, true);
- spin_unlock_irq(&port->outvq_lock);
+ do {
+ spin_lock_irq(&port->outvq_lock);
+ buf = virtqueue_detach_unused_buf(port->out_vq);
+ spin_unlock_irq(&port->outvq_lock);
+ if (buf)
+ free_buf(buf, true);
+ } while (buf);
}
/*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b46547e90..8c347f5c2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1133,10 +1133,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
- int pstate = cpu->pstate.min_pstate;
-
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
@@ -1148,6 +1146,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
pstate_funcs.get_val(cpu, pstate));
}
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int min_pstate, max_pstate;
+
+ update_turbo_state();
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ intel_pstate_set_pstate(cpu, max_pstate);
+}
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1465,7 +1477,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
- cpu = all_cpu_data[0];
+ cpu = all_cpu_data[policy->cpu];
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1509,6 +1521,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ }
+
intel_pstate_set_update_util_hook(policy->cpu);
intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 1f01e98c8..73ae849f5 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_exit(ref);
- wait_for_completion(&dax_pmem->cmp);
}
static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_kill(ref);
+ wait_for_completion(&dax_pmem->cmp);
}
static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 309311b1f..15475892a 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -73,13 +73,13 @@ struct rfc2734_header {
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int retval;
u16 ether_type;
+ if (len <= RFC2374_UNFRAG_HDR_SIZE)
+ return 0;
+
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
+
/* A datagram fragment has been received, now the fun begins. */
+
+ if (len <= RFC2374_FRAG_HDR_SIZE)
+ return 0;
+
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);
+
+ if (fg_off + len > dg_size)
+ return 0;
spin_lock_irqsave(&dev->lock, flags);
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
fw_send_response(card, r, rcode);
}
+static int gasp_source_id(__be32 *p)
+{
+ return be32_to_cpu(p[0]) >> 16;
+}
+
+static u32 gasp_specifier_id(__be32 *p)
+{
+ return (be32_to_cpu(p[0]) & 0xffff) << 8 |
+ (be32_to_cpu(p[1]) & 0xff000000) >> 24;
+}
+
+static u32 gasp_version(__be32 *p)
+{
+ return be32_to_cpu(p[1]) & 0xffffff;
+}
+
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
__be32 *buf_ptr;
int retval;
u32 length;
- u16 source_node_id;
- u32 specifier_id;
- u32 ver;
unsigned long offset;
unsigned long flags;
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
spin_unlock_irqrestore(&dev->lock, flags);
- specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
- | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
- ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
- source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
-
- if (specifier_id == IANA_SPECIFIER_ID &&
- (ver == RFC2734_SW_VERSION
+ if (length > IEEE1394_GASP_HDR_SIZE &&
+ gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
+ (gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
- || ver == RFC3146_SW_VERSION
+ || gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
- )) {
- buf_ptr += 2;
- length -= IEEE1394_GASP_HDR_SIZE;
- fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+ ))
+ fwnet_incoming_packet(dev, buf_ptr + 2,
+ length - IEEE1394_GASP_HDR_SIZE,
+ gasp_source_id(buf_ptr),
context->card->generation, true);
- }
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index af514618d..14f2d9835 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -602,14 +602,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
break;
+ }
if (info.gpioint && idx++ == index) {
int irq = gpiod_to_irq(desc);
@@ -628,7 +631,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 53ff25ac6..b2dee1024 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <linux/anon_inodes.h>
+#include <linux/file.h>
#include <linux/kfifo.h>
#include <linux/poll.h>
#include <linux/timekeeping.h>
@@ -331,6 +332,13 @@ struct linehandle_state {
u32 numdescs;
};
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT | \
+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -342,6 +350,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
/* TODO: check if descriptors are really input */
for (i = 0; i < lh->numdescs; i++) {
val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -412,6 +422,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
+ struct file *file;
int fd, i, ret;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -442,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 lflags = handlereq.flags;
struct gpio_desc *desc;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -477,26 +499,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
i--;
lh->numdescs = handlereq.lines;
- fd = anon_inode_getfd("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_descs;
}
+ file = anon_inode_getfile("gpio-linehandle",
+ &linehandle_fileops,
+ lh,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
handlereq.fd = fd;
if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- ret = -EFAULT;
- goto out_free_descs;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->numdescs);
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_descs:
for (; i >= 0; i--)
gpiod_free(lh->descs[i]);
@@ -534,6 +571,10 @@ struct lineevent_state {
struct mutex read_lock;
};
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
static unsigned int lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
@@ -621,6 +662,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
@@ -693,6 +736,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
struct gpioevent_request eventreq;
struct lineevent_state *le;
struct gpio_desc *desc;
+ struct file *file;
u32 offset;
u32 lflags;
u32 eflags;
@@ -724,6 +768,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
/* This is just wrong: we don't look for events on output lines */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
ret = -EINVAL;
@@ -775,23 +831,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_desc;
- fd = anon_inode_getfd("gpio-event",
- &lineevent_fileops,
- le,
- O_RDONLY | O_CLOEXEC);
+ fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto out_free_irq;
}
+ file = anon_inode_getfile("gpio-event",
+ &lineevent_fileops,
+ le,
+ O_RDONLY | O_CLOEXEC);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto out_put_unused_fd;
+ }
+
eventreq.fd = fd;
if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
- ret = -EFAULT;
- goto out_free_irq;
+ /*
+ * fput() will trigger the release() callback, so do not go onto
+ * the regular error cleanup path here.
+ */
+ fput(file);
+ put_unused_fd(fd);
+ return -EFAULT;
}
+ fd_install(fd, file);
+
return 0;
+out_put_unused_fd:
+ put_unused_fd(fd);
out_free_irq:
free_irq(le->irq, le);
out_free_desc:
@@ -821,6 +892,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
struct gpiochip_info chipinfo;
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
strncpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -837,7 +910,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset > gdev->ngpio)
+ if (lineinfo.line_offset >= gdev->ngpio)
return -EINVAL;
desc = &gdev->descs[lineinfo.line_offset];
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 2a3ded44c..7c8c185c9 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
ssize_t expected_size,
bool *replaced)
{
- struct drm_device *dev = crtc->dev;
struct drm_property_blob *new_blob = NULL;
if (blob_id != 0) {
- new_blob = drm_property_lookup_blob(dev, blob_id);
+ new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length)
+
+ if (expected_size > 0 && expected_size != new_blob->length) {
+ drm_property_unreference_blob(new_blob);
return -EINVAL;
+ }
}
drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ drm_property_unreference_blob(new_blob);
return 0;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 04e457117..aa6444877 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
/* no need to clean up vcpi
* as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
}
kfree(port);
}
@@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_put_port(port);
goto out;
}
- if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
@@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
+ port->pdt = DP_PEER_DEVICE_NONE;
if (!port->input && port->vcpi.vcpi > 0) {
drm_dp_mst_reset_vcpi_slots(mgr, port);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0a06f9120..337c55597 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -129,7 +129,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
- kfree(fb_helper->connector_info[i]);
+ struct drm_fb_helper_connector *fb_helper_connector =
+ fb_helper->connector_info[i];
+
+ drm_connector_unreference(fb_helper_connector->connector);
+
+ kfree(fb_helper_connector);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
@@ -601,6 +606,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_blank);
+static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
+ struct drm_mode_set *modeset)
+{
+ int i;
+
+ for (i = 0; i < modeset->num_connectors; i++) {
+ drm_connector_unreference(modeset->connectors[i]);
+ modeset->connectors[i] = NULL;
+ }
+ modeset->num_connectors = 0;
+
+ drm_mode_destroy(helper->dev, modeset->mode);
+ modeset->mode = NULL;
+
+ /* FIXME should hold a ref? */
+ modeset->fb = NULL;
+}
+
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
@@ -610,10 +633,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->connector_info[i]);
}
kfree(helper->connector_info);
+
for (i = 0; i < helper->crtc_count; i++) {
- kfree(helper->crtc_info[i].mode_set.connectors);
- if (helper->crtc_info[i].mode_set.mode)
- drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
+ struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
+
+ drm_fb_helper_modeset_release(helper, modeset);
+ kfree(modeset->connectors);
}
kfree(helper->crtc_info);
}
@@ -632,7 +657,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
clip->x2 = clip->y2 = 0;
spin_unlock_irqrestore(&helper->dirty_lock, flags);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ /* call dirty callback only when it has been really touched */
+ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
}
/**
@@ -2027,7 +2054,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
- struct drm_mode_set *modeset;
bool *enabled;
int width, height;
int i;
@@ -2075,45 +2101,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- modeset->num_connectors = 0;
- modeset->fb = NULL;
- }
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ drm_fb_helper_modeset_release(fb_helper,
+ &fb_helper->crtc_info[i].mode_set);
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- modeset = &fb_crtc->mode_set;
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
+
fb_crtc->desired_mode = mode;
fb_crtc->x = offset->x;
fb_crtc->y = offset->y;
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
- modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
+ drm_connector_reference(connector);
+ modeset->connectors[modeset->num_connectors++] = connector;
modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
-
- /* Clear out any old modes if there are no more connected outputs. */
- for (i = 0; i < fb_helper->crtc_count; i++) {
- modeset = &fb_helper->crtc_info[i].mode_set;
- if (modeset->num_connectors == 0) {
- BUG_ON(modeset->fb);
- if (modeset->mode)
- drm_mode_destroy(dev, modeset->mode);
- modeset->mode = NULL;
- }
- }
out:
kfree(crtcs);
kfree(modes);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c6e69e4cf..1f8af87c6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_ddc_pin)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(p), i->alternate_ddc_pin,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedly sharing the
+ * pin, then dvi/hdmi couldn't exist on the shared
+ * port. Otherwise they share the same ddc bin and
+ * system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dvi = false;
+ i->supports_hdmi = false;
+ i->alternate_ddc_pin = 0;
+ }
+}
+
+static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port p;
+
+ if (!info->alternate_aux_channel)
+ return;
+
+ for_each_port_masked(p, (1 << port) - 1) {
+ struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
+
+ if (info->alternate_aux_channel != i->alternate_aux_channel)
+ continue;
+
+ DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(p), i->alternate_aux_channel,
+ port_name(port), port_name(p));
+
+ /*
+ * If we have multiple ports supposedlt sharing the
+ * aux channel, then DP couldn't exist on the shared
+ * port. Otherwise they share the same aux channel
+ * and system couldn't communicate with them separately.
+ *
+ * Due to parsing the ports in alphabetical order,
+ * a higher port will always clobber a lower one.
+ */
+ i->supports_dp = false;
+ i->alternate_aux_channel = 0;
+ }
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
@@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- if (port == PORT_E) {
- info->alternate_ddc_pin = ddc_pin;
- /* if DDIE share ddc pin with other port, then
- * dvi/hdmi couldn't exist on the shared port.
- * Otherwise they share the same ddc bin and system
- * couldn't communicate with them seperately. */
- if (ddc_pin == DDC_PIN_B) {
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_C) {
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
- } else if (ddc_pin == DDC_PIN_D) {
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
- dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
- }
- } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
- else if (ddc_pin == DDC_PIN_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
- else if (ddc_pin == DDC_PIN_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+ info->alternate_ddc_pin = ddc_pin;
+
+ sanitize_ddc_pin(dev_priv, port);
}
if (is_dp) {
- if (port == PORT_E) {
- info->alternate_aux_channel = aux_channel;
- /* if DDIE share aux channel with other port, then
- * DP couldn't exist on the shared port. Otherwise
- * they share the same aux channel and system
- * couldn't communicate with them seperately. */
- if (aux_channel == DP_AUX_A)
- dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
- else if (aux_channel == DP_AUX_B)
- dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
- else if (aux_channel == DP_AUX_C)
- dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
- else if (aux_channel == DP_AUX_D)
- dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
- }
- else if (aux_channel == DP_AUX_A && port != PORT_A)
- DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
- else if (aux_channel == DP_AUX_B && port != PORT_B)
- DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
- else if (aux_channel == DP_AUX_C && port != PORT_C)
- DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
- else if (aux_channel == DP_AUX_D && port != PORT_D)
- DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+ info->alternate_aux_channel = aux_channel;
+
+ sanitize_aux_ch(dev_priv, port);
}
if (bdb->version >= 158) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e9a64fba6..63462f279 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13834,7 +13834,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
- to_intel_plane_state(plane_state);
+ to_intel_plane_state(plane->state);
if (!intel_plane_state->wait_req)
continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1ca155f4d..3051182cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1090,6 +1090,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static enum port intel_aux_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum port aux_port;
+
+ if (!info->alternate_aux_channel) {
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ port_name(port), port_name(port));
+ return port;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_port = PORT_A;
+ break;
+ case DP_AUX_B:
+ aux_port = PORT_B;
+ break;
+ case DP_AUX_C:
+ aux_port = PORT_C;
+ break;
+ case DP_AUX_D:
+ aux_port = PORT_D;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_port = PORT_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ port_name(aux_port), port_name(port));
+
+ return aux_port;
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -1150,36 +1188,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
}
}
-/*
- * On SKL we don't have Aux for port E so we rely
- * on VBT to set a proper alternate aux channel.
- */
-static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
-{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[PORT_E];
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- return PORT_A;
- case DP_AUX_B:
- return PORT_B;
- case DP_AUX_C:
- return PORT_C;
- case DP_AUX_D:
- return PORT_D;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- return PORT_A;
- }
-}
-
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
enum port port)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1195,9 +1206,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
enum port port, int index)
{
- if (port == PORT_E)
- port = skl_porte_aux_port(dev_priv);
-
switch (port) {
case PORT_A:
case PORT_B:
@@ -1235,7 +1243,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
static void intel_aux_reg_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
- enum port port = dp_to_dig_port(intel_dp)->port;
+ enum port port = intel_aux_port(dev_priv,
+ dp_to_dig_port(intel_dp)->port);
int i;
intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 3836a1c79..ad483376b 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_INFO(dev_priv)->gen >= 7)
+ if (INTEL_GEN(dev_priv) == 7)
lines = min(lines, 2048);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ lines = min(lines, 2560);
/* Hardware needs the full buffer stride, not just the active area. */
return lines * cache->fb.stride;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e59a28cb3..a69160568 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3363,13 +3363,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
int num_active;
int id, i;
+ /* Clear the partitioning for disabled planes. */
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+
if (WARN_ON(!state))
return 0;
if (!cstate->base.active) {
ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
return 0;
}
@@ -3469,12 +3471,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
-static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
-{
- /* TODO: Take into account the scalers once we support them */
- return config->base.adjusted_mode.crtc_clock;
-}
-
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
* for the read latency) and cpp should always be <= 8, so that
@@ -3525,7 +3521,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
downscale_amount = skl_plane_downscale_amount(pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3737,11 +3733,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (!cstate->base.active)
return 0;
- if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
+ if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
return 0;
return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- skl_pipe_pixel_rate(cstate));
+ ilk_pipe_pixel_rate(cstate));
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
@@ -4051,6 +4047,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
intel_state->wm_results.dirty_pipes = ~0;
}
+ /*
+ * We're not recomputing for the pipes not included in the commit, so
+ * make sure we start with the current state.
+ */
+ memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+
for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
struct intel_crtc_state *cstate;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 29423e757..927c51e8a 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -108,6 +108,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
{
struct drm_plane *plane = &ipu_plane->base;
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
struct drm_framebuffer *fb = state->fb;
unsigned long eba, ubo, vbo;
int active;
@@ -149,7 +150,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
break;
}
- if (old_state->fb) {
+ if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -359,7 +360,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
return -EINVAL;
- if (old_fb) {
+ if (old_fb &&
+ (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
+ old_fb->pixel_format == DRM_FORMAT_YVU420)) {
old_ubo = drm_plane_state_to_ubo(old_state);
old_vbo = drm_plane_state_to_vbo(old_state);
if (ubo != old_ubo || vbo != old_vbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dc57b628e..193573d19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_adev)
return false;
- return acpi_has_method(parent_adev->handle, "_PR3");
+ return parent_adev->power.flags.power_resources &&
+ acpi_has_method(parent_adev->handle, "_PR3");
}
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 05920c4c9..d4165a6c2 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1376,9 +1376,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
- u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
- WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(SRBM_GFX_CNTL, RINGID(ring));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index db64e0062..3b0c229d7 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
tmp &= AUX_HPD_SEL(0x7);
tmp |= AUX_HPD_SEL(chan->rec.hpd);
- tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ tmp |= AUX_EN | AUX_LS_READ_EN;
WREG32(AUX_CONTROL + aux_offset[instance], tmp);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 89bdf2034..c49934527 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
int i;
struct si_dpm_quirk *p = si_dpm_quirk_list;
+ /* limit all SI kickers */
+ if (rdev->family == CHIP_PITCAIRN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ max_mclk = 120000;
+ } else if (rdev->family == CHIP_VERDE) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ } else if (rdev->family == CHIP_HAINAN) {
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667)) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
+ }
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
- /* limit mclk on all R7 370 parts for stability */
- if (rdev->pdev->device == 0x6811 &&
- rdev->pdev->revision == 0x81)
- max_mclk = 120000;
- /* limit sclk/mclk on Jet parts for stability */
- if (rdev->pdev->device == 0x6665 &&
- rdev->pdev->revision == 0xc3) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e92b09d32..9ab703c10 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -179,6 +179,7 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index bb400081e..85fcf60f3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index d5acaa2d8..9dc637253 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 5c5b7cada..dfae43523 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
t_calc->div_low--;
t_calc->div_high--;
+ /* Give the tuning value 0, that would not update con register */
+ t_calc->tuning = 0;
/* Maximum divider supported by hw is 0xffff */
if (t_calc->div_low > 0xffff) {
t_calc->div_low = 0xffff;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 4233f5695..3c38029e3 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan;
struct mbox_client mbox_client;
struct completion rd_complete;
- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index da3a02ef4..a9a9f6694 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1592,6 +1592,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
static void of_i2c_register_devices(struct i2c_adapter *adap)
{
struct device_node *node;
+ struct i2c_client *client;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
@@ -1602,7 +1603,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
for_each_available_child_of_node(adap->dev.of_node, node) {
if (of_node_test_and_set_flag(node, OF_POPULATED))
continue;
- of_i2c_register_device(adap, node);
+
+ client = of_i2c_register_device(adap, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adap->dev,
+ "Failed to create I2C device for %s\n",
+ node->full_name);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
}
}
@@ -2073,6 +2081,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -2083,7 +2092,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
pr_debug("driver [%s] registered\n", driver->driver.name);
- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);
@@ -2201,6 +2209,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%s'\n",
rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
break;
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 407f141a1..a3fbdb761 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -207,13 +207,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
struct device *dev = &data->client->dev;
int ret;
unsigned int val;
+ __be16 rval;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
if (ret)
return ret;
- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
- be16_to_cpu(val) % 100);
+ val = be16_to_cpu(rval);
+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
if (ret)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f4bfb4b2d..073246c7d 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8abde6b8c..6d5381096 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -266,7 +266,7 @@ static struct raid_type {
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
+ if (le32_to_cpu(sb->level) != mddev->new_level) {
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
return -EINVAL;
}
- if (le32_to_cpu(sb->layout) != mddev->layout) {
+ if (le32_to_cpu(sb->layout) != mddev->new_layout) {
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
DMERR(" Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
raid10_md_layout_to_copies(mddev->layout));
return -EINVAL;
}
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
return -EINVAL;
}
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
+ DMINFO("Discovered old metadata format; upgrading to extended metadata format");
+
/* Table line is checked vs. authoritative superblock */
rs_set_new(rs);
}
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ if (le32_to_cpu(sb->compat_features) &&
+ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
return -EINVAL;
}
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 9, 0},
+ .version = {1, 9, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bdf1606f6..7a6254d54 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1292,6 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
+ bio->bi_error = 0;
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 5da86c8b6..2154596ee 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -835,8 +835,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
init_kthread_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task))
- return PTR_ERR(md->kworker_task);
+ if (IS_ERR(md->kworker_task)) {
+ int error = PTR_ERR(md->kworker_task);
+ md->kworker_task = NULL;
+ return error;
+ }
elv_register_queue(md->queue);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3e407a9cd..c4b53b332 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0f2928b31..eeef575fb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->queue)
blk_cleanup_queue(md->queue);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->bdev) {
bdput(md->bdev);
md->bdev = NULL;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 915e84d63..db0aa6c05 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8120,14 +8120,14 @@ void md_do_sync(struct md_thread *thread)
if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync > 2) {
+ mddev->curr_resync > 3) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 21dc00eb1..95bf4cd8f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -407,11 +407,14 @@ static void raid1_end_write_request(struct bio *bio)
struct bio *to_put = NULL;
int mirror = find_bio_disk(r1_bio, bio);
struct md_rdev *rdev = conf->mirrors[mirror].rdev;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
/*
* 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -448,7 +451,7 @@ static void raid1_end_write_request(struct bio *bio)
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index be1a9fca3..39fddda2f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
struct r10conf *conf = r10_bio->mddev->private;
int slot, repl;
struct md_rdev *rdev = NULL;
+ bool discard_error;
+
+ discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
@@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (bio->bi_error) {
+ if (bio->bi_error && !discard_error) {
if (repl)
/* Never record new bad blocks to replacement,
* just fail it.
@@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
- &first_bad, &bad_sectors)) {
+ &first_bad, &bad_sectors) && !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 9fb4fc26a..ed9759e8a 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -675,6 +675,13 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
unsigned long flags;
int ret;
+ /* Clear the buffers ready flag to make sure the device won't be started
+ * by a QBUF on the video node on the other side of the pipeline.
+ */
+ spin_lock_irqsave(&video->irqlock, flags);
+ pipe->buffers_ready &= ~(1 << video->pipe_index);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
mutex_lock(&pipe->lock);
if (--pipe->stream_count == pipe->num_inputs) {
/* Stop the pipeline. */
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index af23d7dfe..2e5233b60 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -247,7 +247,9 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->glpid);
put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index d0b421f49..77080cc5f 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
/*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
+ /*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
@@ -205,15 +215,6 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc) {
- afu_release_irqs(ctx, ctx);
- goto out;
- }
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
@@ -221,6 +222,9 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
amr))) {
afu_release_irqs(ctx, ctx);
cxl_adapter_context_put(ctx->afu->adapter);
+ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
goto out;
}
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 222367cc8..524660510 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT;
- goto err_out1;
+ goto err_out2;
}
}
return 0;
+ err_out2:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+ sgl->lpage_dma_addr);
+ sgl->lpage = NULL;
+ sgl->lpage_dma_addr = 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
+ sgl->fpage = NULL;
+ sgl->fpage_dma_addr = 0;
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
+ sgl->sgl = NULL;
+ sgl->sgl_dma_addr = 0;
+ sgl->sgl_size = 0;
return -ENOMEM;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 4a6c1b85f..2d23cdf8a 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -978,11 +978,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
- if (hhisr & IPC_HHIER_SEC && aliveness)
+ if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
- else
+ } else {
ipc_isr = 0;
+ hhisr &= ~IPC_HHIER_SEC;
+ }
generated = generated ||
(hisr & HISR_INT_STS_MSK) ||
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index c0bb0c793..dbbc4303b 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
host->pdata = pdev->dev.platform_data;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Get registers' physical base address */
- host->phy_regs = regs->start;
host->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ /* Get registers' physical base address */
+ host->phy_regs = regs->start;
+
platform_set_drvdata(pdev, host);
return dw_mci_probe(host);
}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 48eb55f34..a01a70a8f 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -515,10 +515,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum);
+
if (err == UBI_IO_FF_BITFLIPS)
- add_aeb(ai, free, pnum, ec, 1);
- else
- add_aeb(ai, free, pnum, ec, 0);
+ scrub = 1;
+
+ add_aeb(ai, free, pnum, ec, scrub);
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -750,11 +751,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
- if (!av)
- goto fail_bad;
- if (PTR_ERR(av) == -EINVAL) {
- ubi_err(ubi, "volume (ID %i) already exists",
- fmvhdr->vol_id);
+ if (IS_ERR(av)) {
+ if (PTR_ERR(av) == -EEXIST)
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+
goto fail_bad;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 30ae5bf81..76ad825a8 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -445,6 +445,7 @@ struct ath10k_debug {
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8f0fd41df..8c6a5dd7e 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -1450,56 +1452,51 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(ar->hw_params.cal_data_len);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1507,18 +1504,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- void *buf = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, ar->hw_params.cal_data_len);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1579,7 +1574,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1931,6 +1925,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2343,6 +2339,10 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2356,6 +2356,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d..7e27a06e5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
- break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Accumulate cal measures for active chains
*/
- if (cur_caldata->calCollect)
- cur_caldata->calCollect(ah);
+ cur_caldata->calCollect(ah);
ah->cal_samples++;
if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Process accumulated data
*/
- if (cur_caldata->calPostProc)
- cur_caldata->calPostProc(ah, numChains);
+ cur_caldata->calPostProc(ah, numChains);
/* Calibration has finished. */
caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
-static const struct ath9k_percal_data temp_cal_single_sample = {
- TEMP_COMP_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
-};
-
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- INIT_CAL(&ah->temp_caldata);
- INSERT_CAL(ah, &ah->temp_caldata);
-
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad11..9cbca1229 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
- struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4341d5680..a28093235 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -231,7 +231,7 @@ struct rtl8xxxu_rxdesc16 {
u32 pattern1match:1;
u32 pattern0match:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
#if 0
u32 bassn:12;
u32 bavld:1;
@@ -361,7 +361,7 @@ struct rtl8xxxu_rxdesc24 {
u32 ldcp:1;
u32 splcp:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
};
struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 1348819f6..7557a82db 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
/*
* No indication anywhere as to what 0x0790 does. The 2 antenna
* vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 4b56f3921..5ca39e29c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5193,7 +5193,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
sizeof(struct rtl8xxxu_rxdesc16), 128);
- if (pkt_cnt > 1)
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (pkt_cnt > 1 &&
+ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5211,7 +5216,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
@@ -5281,7 +5286,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0dbd29e28..172ef8245 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -339,6 +339,8 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i;
int ret = 0;
+ pwmchip_sysfs_unexport_children(chip);
+
mutex_lock(&pwm_lock);
for (i = 0; i < chip->npwm; i++) {
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 18ed72559..0296d8178 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -409,6 +409,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
}
}
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+ struct device *parent;
+ unsigned int i;
+
+ parent = class_find_device(&pwm_class, NULL, chip,
+ pwmchip_sysfs_match);
+ if (!parent)
+ return;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ if (test_bit(PWMF_EXPORTED, &pwm->flags))
+ pwm_unexport_child(parent, pwm);
+ }
+}
+
static int __init pwm_sysfs_init(void)
{
return class_register(&pwm_class);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 3d53d636b..f0cfb0451 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
- int lun = cmd->device->lun;
- uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
- cmd->result = (DID_NO_CONNECT << 16);
- }
- cmd->scsi_done(cmd);
- return 0;
- }
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 75455d4da..1deb6adc4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -621,9 +621,6 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
wmb();
}
- if (sdev->request_queue)
- blk_set_queue_depth(sdev->request_queue, depth);
-
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6a219a084..05e892a23 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
+ vfree(map_storep);
vfree(dif_storep);
vfree(fake_storep);
kfree(sdebug_q_arr);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 8d85a3c34..3f3561371 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -581,7 +581,7 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
mspi->len -= rx_nr_bytes;
- if (mspi->rx)
+ if (rx_nr_bytes && mspi->rx)
mspi->get_rx(rx_data, mspi);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 200ca228d..935f1a511 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1607,9 +1607,11 @@ static void of_register_spi_devices(struct spi_master *master)
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
spi = of_register_spi_device(master, nc);
- if (IS_ERR(spi))
+ if (IS_ERR(spi)) {
dev_warn(&master->dev, "Failed to create SPI device for %s\n",
nc->full_name);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
}
}
#else
@@ -3120,6 +3122,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(spi)) {
pr_err("%s: failed to create for '%s'\n",
__func__, rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(spi));
}
break;
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 78f524fcd..f4dbcb19d 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 0e4dc0afc..7a223074d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-
static int __init powerclamp_probe(void)
{
- if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("Intel powerclamp does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_err("CPU does not support MWAIT");
return -ENODEV;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 2705ca960..fd375f15b 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (con_is_visible(vc))
update_screen(vc);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 053bac9d9..887be343f 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -185,6 +185,8 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
+ ci->role = CI_ROLE_END;
+ synchronize_irq(ci->irq);
usb_put_hcd(hcd);
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 685446189..6443cfba7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3055,7 +3055,7 @@ err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
@@ -3080,7 +3080,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 5f562c1ec..9b9e71f2c 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -587,8 +587,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
/* throttle high/super speed IRQ rate back slightly */
if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
+ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)) &&
+ !list_empty(&dev->tx_reqs))
? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
: 0;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index bb1f6c8f0..45bc997d0 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = name;
+ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1700908b8..86612ac3f 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -72,7 +72,7 @@
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
+#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#include "ohci.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 730b9fd26..0ef16900e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(int port_index,
+ __le32 __iomem **port_array)
+{
+ u32 portsc;
+
+ portsc = readl(port_array[port_index]);
+
+ /* if any of these are set we are not stuck */
+ if (portsc & (PORT_CONNECT | PORT_CAS))
+ return false;
+
+ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+ return false;
+
+ /* clear wakeup/change bits, and do a warm port reset */
+ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ portsc |= PORT_WR;
+ writel(portsc, port_array[port_index]);
+ /* flush write */
+ readl(port_array[port_index]);
+ return true;
+}
+
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
temp = readl(port_array[port_index]);
+
+ /* warm reset CAS limited ports stuck in polling/compliance */
+ if ((xhci->quirks & XHCI_MISSING_CAS) &&
+ (hcd->speed >= HCD_USB3) &&
+ xhci_port_missing_cas_quirk(port_index, port_array)) {
+ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+ continue;
+ }
if (DEV_SUPERSPEED_ANY(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7b0f97ab..e96ae80d1 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -45,11 +45,13 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
static const char hcd_name[] = "xhci_hcd";
@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b2c1dc5dc..f94538003 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -314,6 +314,8 @@ struct xhci_op_regs {
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
@@ -1653,6 +1655,7 @@ struct xhci_hcd {
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
+#define XHCI_MISSING_CAS (1 << 24)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 0b4cec940..dae92de92 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -337,6 +337,7 @@ static int omap2430_musb_init(struct musb *musb)
}
musb->isr = omap2430_musb_interrupt;
phy_init(musb->phy);
+ phy_power_on(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -373,8 +374,6 @@ static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
- if (!WARN_ON(!musb->phy))
- phy_power_on(musb->phy);
omap2430_set_power(musb, true, glue->cable_connected);
@@ -413,9 +412,6 @@ static void omap2430_musb_disable(struct musb *musb)
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- if (!WARN_ON(!musb->phy))
- phy_power_off(musb->phy);
-
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
@@ -429,6 +425,7 @@ static int omap2430_musb_exit(struct musb *musb)
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
phy_exit(musb->phy);
musb->phy = NULL;
cancel_work_sync(&glue->omap_musb_mailbox_work);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 1d70add92..d544b331c 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include "common.h"
#include "rcar3.h"
@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
- if (enable)
+ if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
- else
+ /* The controller on R-Car Gen3 needs to wait up to 45 usec */
+ udelay(45);
+ } else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ }
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 54a4de0ef..f61477bed 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
u8 control;
int result;
- cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ if (result)
+ return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b2d767e74..0ff7f38d7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
/* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f87a938cf..21011c0a4 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -626,8 +626,9 @@
/*
* Infineon Technologies
*/
-#define INFINEON_VID 0x058b
-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
* Acton Research Corp.
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d213cf44a..4a037b4a7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->disconnected = 0;
- usb_serial_console_init(serial->port[0]->minor);
+ if (num_ports > 0)
+ usb_serial_console_init(serial->port[0]->minor);
exit:
module_put(type->driver.owner);
return 0;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 9e4800a4e..951dd93f8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -5348,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->phy_base) {
DSSERR("can't ioremap DSI PHY\n");
return -ENOMEM;
}
@@ -5368,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
resource_size(res));
- if (!dsi->proto_base) {
+ if (!dsi->pll_base) {
DSSERR("can't ioremap DSI PLL\n");
return -ENOMEM;
}
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 2c0487f4f..ed41fdb42 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2125,7 +2125,7 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp,
timings = of_get_display_timings(disp);
if (!timings)
- goto out;
+ return -EINVAL;
ret = -ENOMEM;
info->modes = kmalloc_array(timings->num_timings,
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 8c4e61783..6d9e5173d 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
- rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ } else {
+ /*
+ * The virtio ring base address is expressed as a 32-bit PFN,
+ * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
+ */
+ dma_set_coherent_mask(&pci_dev->dev,
+ DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
+ }
+
if (rc)
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index ed9c9eeed..6b2cd922d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
}
@@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
END_USE(vq);
@@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
* more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
- * entry. Always do both to keep code simple. */
+ * entry. Always update the event index to keep code simple. */
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
}
/* TODO: tune this threshold */
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ if (!vq->event)
+ vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
}
/* Put everything in free lists. */
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e6811c42e..bc1a004d4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8915,9 +8915,14 @@ again:
* So even we call qgroup_free_data(), it won't decrease reserved
* space.
* 2) Not written to disk
- * This means the reserved space should be freed here.
+ * This means the reserved space should be freed here. However,
+ * if a truncate invalidates the page (by clearing PageDirty)
+ * and the page is accounted for while allocating extent
+ * in btrfs_check_data_free_space() we let delayed_ref to
+ * free the entire extent.
*/
- btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
+ if (PageDirty(page))
+ btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
if (!inode_evicting) {
clear_extent_bit(tree, page_start, page_end,
EXTENT_LOCKED | EXTENT_DIRTY |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ef9c55bc7..90e1198bc 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
int index, int error)
{
struct btrfs_log_ctx *ctx;
+ struct btrfs_log_ctx *safe;
- if (!error) {
- INIT_LIST_HEAD(&root->log_ctxs[index]);
- return;
- }
-
- list_for_each_entry(ctx, &root->log_ctxs[index], list)
+ list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
+ list_del_init(&ctx->list);
ctx->log_ret = error;
+ }
INIT_LIST_HEAD(&root->log_ctxs[index]);
}
@@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
out_wake_log_root:
- /*
- * We needn't get log_mutex here because we are sure all
- * the other tasks are blocked.
- */
+ mutex_lock(&log_root_tree->log_mutex);
btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
- mutex_lock(&log_root_tree->log_mutex);
log_root_tree->log_transid_committed++;
atomic_set(&log_root_tree->log_commit[index2], 0);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2978,10 +2972,8 @@ out_wake_log_root:
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
wake_up(&log_root_tree->log_commit_wait[index2]);
out:
- /* See above. */
- btrfs_remove_all_log_ctxs(root, index1, ret);
-
mutex_lock(&root->log_mutex);
+ btrfs_remove_all_log_ctxs(root, index1, ret);
root->log_transid_committed++;
atomic_set(&root->log_commit[index1], 0);
mutex_unlock(&root->log_mutex);
diff --git a/fs/buffer.c b/fs/buffer.c
index 6a5f1a011..9c8eb9b6d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1698,7 +1698,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
struct buffer_head *bh, *head;
unsigned int blocksize, bbits;
int nr_underway = 0;
- int write_flags = wbc_to_write_flags(wbc);
+ int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate));
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index cb0528b31..ccb401eeb 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1240,7 +1240,7 @@ static int f2fs_write_data_page(struct page *page,
.sbi = sbi,
.type = DATA,
.op = REQ_OP_WRITE,
- .op_flags = wbc_to_write_flags(wbc),
+ .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
.page = page,
.encrypted_page = NULL,
};
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index c1713da25..f75d197d5 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1561,7 +1561,7 @@ static int f2fs_write_node_page(struct page *page,
.sbi = sbi,
.type = NODE,
.op = REQ_OP_WRITE,
- .op_flags = wbc_to_write_flags(wbc),
+ .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
.page = page,
.encrypted_page = NULL,
};
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 7991c62e9..950b8be68 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,7 +37,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
{
struct buffer_head *bh, *head;
int nr_underway = 0;
- int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
+ int write_flags = REQ_META | REQ_PRIO |
+ (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
diff --git a/fs/mpage.c b/fs/mpage.c
index d6f1afe33..d2413af08 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
struct buffer_head map_bh;
loff_t i_size = i_size_read(inode);
int ret = 0;
- int op_flags = wbc_to_write_flags(wbc);
+ int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a204d7e10..0fe31b4b1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1147,9 +1147,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
-
- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@@ -1158,12 +1156,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
- spin_lock(&oo->oo_owner.so_client->cl_lock);
+ spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
- spin_unlock(&oo->oo_owner.so_client->cl_lock);
+ spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index abadbc30e..767377e52 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -171,6 +171,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
len -= bytes;
}
+ if (!error)
+ error = vfs_fsync(new_file, 0);
fput(new_file);
out_fput:
fput(old_file);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c75625c1e..cf2bfeb1b 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -294,9 +294,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
return NULL;
- if (!realinode->i_op->get_acl)
- return NULL;
-
old_cred = ovl_override_creds(inode->i_sb);
acl = get_acl(realinode, type);
revert_creds(old_cred);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index e2a94a267..a78415d77 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1026,6 +1026,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
posix_acl_release(acl);
+ /*
+ * Check if sgid bit needs to be cleared (actual setacl operation will
+ * be done with mounter's capabilities and so that won't do it for us).
+ */
+ if (unlikely(inode->i_mode & S_ISGID) &&
+ handler->flags == ACL_TYPE_ACCESS &&
+ !in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
+ struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
+
+ err = ovl_setattr(dentry, &iattr);
+ if (err)
+ return err;
+ }
+
err = ovl_xattr_set(dentry, handler->name, value, size, flags);
if (!err)
ovl_copyattr(ovl_inode_real(inode, NULL), inode);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 4b86d3a73..3b27145f9 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
*/
static int ubifs_readdir(struct file *file, struct dir_context *ctx)
{
- int err;
+ int err = 0;
struct qstr nm;
union ubifs_key key;
struct ubifs_dent_node *dent;
@@ -452,14 +452,20 @@ out:
kfree(file->private_data);
file->private_data = NULL;
- if (err != -ENOENT) {
+ if (err != -ENOENT)
ubifs_err(c, "cannot find next direntry, error %d", err);
- return err;
- }
+ else
+ /*
+ * -ENOENT is a non-fatal error in this context, the TNC uses
+ * it to indicate that the cursor moved past the current directory
+ * and readdir() has to stop.
+ */
+ err = 0;
+
/* 2 is a special value indicating that there are no more direntries */
ctx->pos = 2;
- return 0;
+ return err;
}
/* Free saved readdir() state when the directory is closed */
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 3cc3cf767..ac9a003dd 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
if (mp->m_quotainfo)
ndquots = mp->m_quotainfo->qi_dqperchunk;
else
- ndquots = xfs_calc_dquots_per_chunk(
- XFS_BB_TO_FSB(mp, bp->b_length));
+ ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
for (i = 0; i < ndquots; i++, d++) {
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index a68645abd..7575cfc3a 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -447,8 +447,8 @@ xfs_submit_ioend(
ioend->io_bio->bi_private = ioend;
ioend->io_bio->bi_end_io = xfs_end_bio;
- bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, wbc_to_write_flags(wbc));
-
+ bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
+ (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
/*
* If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately
@@ -519,7 +519,8 @@ xfs_chain_bio(
bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
- bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, wbc_to_write_flags(wbc));
+ bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
+ (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
submit_bio(ioend->io_bio);
ioend->io_bio = new;
}
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index dc5f76d7f..c357f27d5 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,8 +116,6 @@ struct bdi_writeback {
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
- unsigned long dirty_sleep; /* last wait */
-
struct list_head bdi_node; /* anchored at bdi->wb_list */
#ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 95fbfa1fe..436f43f87 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -155,7 +155,6 @@ enum rq_flag_bits {
__REQ_INTEGRITY, /* I/O includes block integrity payload */
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
- __REQ_BG, /* background activity */
/* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -199,7 +198,7 @@ enum rq_flag_bits {
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
- REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_BG)
+ REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
/* This mask is used for both bio and request merge checking */
@@ -224,7 +223,6 @@ enum rq_flag_bits {
#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
-#define REQ_BG (1ULL << __REQ_BG)
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
#define REQ_PM (1ULL << __REQ_PM)
@@ -266,16 +264,4 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
}
-#define BLK_RQ_STAT_BATCH 64
-
-struct blk_rq_stat {
- s64 mean;
- u64 min;
- u64 max;
- s32 nr_samples;
- s32 nr_batch;
- u64 batch;
- s64 time;
-};
-
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2624a0220..9de483535 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,7 +24,6 @@
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
-#include <linux/wbt.h>
struct module;
struct scsi_ioctl_command;
@@ -38,7 +37,6 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
-struct rq_wb;
#define BLKDEV_MIN_RQ 4
#ifdef CONFIG_PCK_INTERACTIVE
@@ -157,7 +155,6 @@ struct request {
struct gendisk *rq_disk;
struct hd_struct *part;
unsigned long start_time;
- struct wb_issue_stat wb_stat;
#ifdef CONFIG_BLK_CGROUP
struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
@@ -309,8 +306,6 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- struct rq_wb *rq_wb;
-
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
* is used, root blkg allocates from @q->root_rl and all other
@@ -336,8 +331,6 @@ struct request_queue {
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
- unsigned int queue_depth;
-
/* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
@@ -423,9 +416,6 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
-
- struct blk_rq_stat rq_stats[2];
-
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
@@ -697,14 +687,6 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
return false;
}
-static inline unsigned int blk_queue_depth(struct request_queue *q)
-{
- if (q->queue_depth)
- return q->queue_depth;
-
- return q->nr_requests;
-}
-
/*
* q->prep_rq_fn return values
*/
@@ -1021,7 +1003,6 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
-extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6f180afdd..1b5999474 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -189,8 +189,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
* by a cache flush and data is guaranteed to be on
* non-volatile media on completion.
- * WRITE_BG Background write. This is for background activity like
- * the periodic flush and background threshold writeback
*
*/
#define RW_MASK REQ_OP_WRITE
@@ -204,7 +202,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
-#define WRITE_BG (REQ_NOIDLE | REQ_BG)
/*
* Attribute flags. These should be or-ed together to figure out what
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index f1bbae014..2c6c5114c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -641,6 +641,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -649,6 +650,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
+
+static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/wbt.h b/include/linux/wbt.h
deleted file mode 100644
index 68ba75e3a..000000000
--- a/include/linux/wbt.h
+++ /dev/null
@@ -1,141 +0,0 @@
-#ifndef WB_THROTTLE_H
-#define WB_THROTTLE_H
-
-#include <linux/atomic.h>
-#include <linux/wait.h>
-#include <linux/timer.h>
-#include <linux/ktime.h>
-
-enum wbt_flags {
- WBT_TRACKED = 1, /* write, tracked for throttling */
- WBT_READ = 2, /* read */
- WBT_KSWAPD = 4, /* write, from kswapd */
-
- WBT_NR_BITS = 3, /* number of bits */
-};
-
-enum {
- /*
- * Set aside 3 bits for state, rest is a time stamp
- */
- ISSUE_STAT_SHIFT = 64 - WBT_NR_BITS,
- ISSUE_STAT_MASK = ~((1ULL << ISSUE_STAT_SHIFT) - 1),
- ISSUE_STAT_TIME_MASK = ~ISSUE_STAT_MASK,
-
- WBT_NUM_RWQ = 2,
-};
-
-struct wb_issue_stat {
- u64 time;
-};
-
-static inline void wbt_issue_stat_set_time(struct wb_issue_stat *stat)
-{
- stat->time = (stat->time & ISSUE_STAT_MASK) |
- (ktime_to_ns(ktime_get()) & ISSUE_STAT_TIME_MASK);
-}
-
-static inline u64 wbt_issue_stat_get_time(struct wb_issue_stat *stat)
-{
- return stat->time & ISSUE_STAT_TIME_MASK;
-}
-
-static inline void wbt_clear_state(struct wb_issue_stat *stat)
-{
- stat->time &= ISSUE_STAT_TIME_MASK;
-}
-
-static inline enum wbt_flags wbt_stat_to_mask(struct wb_issue_stat *stat)
-{
- return (stat->time & ISSUE_STAT_MASK) >> ISSUE_STAT_SHIFT;
-}
-
-static inline void wbt_track(struct wb_issue_stat *stat, enum wbt_flags wb_acct)
-{
- stat->time |= ((u64) wb_acct) << ISSUE_STAT_SHIFT;
-}
-
-static inline bool wbt_is_tracked(struct wb_issue_stat *stat)
-{
- return (stat->time >> ISSUE_STAT_SHIFT) & WBT_TRACKED;
-}
-
-static inline bool wbt_is_read(struct wb_issue_stat *stat)
-{
- return (stat->time >> ISSUE_STAT_SHIFT) & WBT_READ;
-}
-
-struct wb_stat_ops {
- void (*get)(void *, struct blk_rq_stat *);
- bool (*is_current)(struct blk_rq_stat *);
- void (*clear)(void *);
-};
-
-struct rq_wait {
- wait_queue_head_t wait;
- atomic_t inflight;
-};
-
-struct rq_wb {
- /*
- * Settings that govern how we throttle
- */
- unsigned int wb_background; /* background writeback */
- unsigned int wb_normal; /* normal writeback */
- unsigned int wb_max; /* max throughput writeback */
- int scale_step;
- bool scaled_max;
-
- /*
- * Number of consecutive periods where we don't have enough
- * information to make a firm scale up/down decision.
- */
- unsigned int unknown_cnt;
-
- u64 win_nsec; /* default window size */
- u64 cur_win_nsec; /* current window size */
-
- struct timer_list window_timer;
-
- s64 sync_issue;
- void *sync_cookie;
-
- unsigned int wc;
- unsigned int queue_depth;
-
- unsigned long last_issue; /* last non-throttled issue */
- unsigned long last_comp; /* last non-throttled comp */
- unsigned long min_lat_nsec;
- struct backing_dev_info *bdi;
- struct rq_wait rq_wait[WBT_NUM_RWQ];
-
- struct wb_stat_ops *stat_ops;
- void *ops_data;
-};
-
-static inline unsigned int wbt_inflight(struct rq_wb *rwb)
-{
- unsigned int i, ret = 0;
-
- for (i = 0; i < WBT_NUM_RWQ; i++)
- ret += atomic_read(&rwb->rq_wait[i].inflight);
-
- return ret;
-}
-
-struct backing_dev_info;
-
-void __wbt_done(struct rq_wb *, enum wbt_flags);
-void wbt_done(struct rq_wb *, struct wb_issue_stat *);
-enum wbt_flags wbt_wait(struct rq_wb *, unsigned int, spinlock_t *);
-struct rq_wb *wbt_init(struct backing_dev_info *, struct wb_stat_ops *, void *);
-void wbt_exit(struct rq_wb *);
-void wbt_update_limits(struct rq_wb *);
-void wbt_requeue(struct rq_wb *, struct wb_issue_stat *);
-void wbt_issue(struct rq_wb *, struct wb_issue_stat *);
-void wbt_disable(struct rq_wb *);
-
-void wbt_set_queue_depth(struct rq_wb *, unsigned int);
-void wbt_set_write_cache(struct rq_wb *, bool);
-
-#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index e53abf2bf..fc1e16c25 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -100,16 +100,6 @@ struct writeback_control {
#endif
};
-static inline int wbc_to_write_flags(struct writeback_control *wbc)
-{
- if (wbc->sync_mode == WB_SYNC_ALL)
- return WRITE_SYNC;
- else if (wbc->for_kupdate || wbc->for_background)
- return WRITE_BG;
-
- return 0;
-}
-
/*
* A wb_domain represents a domain that wb's (bdi_writeback's) belong to
* and are measured against each other in. There always is one global
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
deleted file mode 100644
index 926c7ee0e..000000000
--- a/include/trace/events/wbt.h
+++ /dev/null
@@ -1,153 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM wbt
-
-#if !defined(_TRACE_WBT_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_WBT_H
-
-#include <linux/tracepoint.h>
-#include <linux/wbt.h>
-
-/**
- * wbt_stat - trace stats for blk_wb
- * @stat: array of read/write stats
- */
-TRACE_EVENT(wbt_stat,
-
- TP_PROTO(struct backing_dev_info *bdi, struct blk_rq_stat *stat),
-
- TP_ARGS(bdi, stat),
-
- TP_STRUCT__entry(
- __array(char, name, 32)
- __field(s64, rmean)
- __field(u64, rmin)
- __field(u64, rmax)
- __field(s64, rnr_samples)
- __field(s64, rtime)
- __field(s64, wmean)
- __field(u64, wmin)
- __field(u64, wmax)
- __field(s64, wnr_samples)
- __field(s64, wtime)
- ),
-
- TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
- __entry->rmean = stat[0].mean;
- __entry->rmin = stat[0].min;
- __entry->rmax = stat[0].max;
- __entry->rnr_samples = stat[0].nr_samples;
- __entry->wmean = stat[1].mean;
- __entry->wmin = stat[1].min;
- __entry->wmax = stat[1].max;
- __entry->wnr_samples = stat[1].nr_samples;
- ),
-
- TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, "
- "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n",
- __entry->name, __entry->rmean, __entry->rmin, __entry->rmax,
- __entry->rnr_samples, __entry->wmean, __entry->wmin,
- __entry->wmax, __entry->wnr_samples)
-);
-
-/**
- * wbt_lat - trace latency event
- * @lat: latency trigger
- */
-TRACE_EVENT(wbt_lat,
-
- TP_PROTO(struct backing_dev_info *bdi, unsigned long lat),
-
- TP_ARGS(bdi, lat),
-
- TP_STRUCT__entry(
- __array(char, name, 32)
- __field(unsigned long, lat)
- ),
-
- TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
- __entry->lat = div_u64(lat, 1000);
- ),
-
- TP_printk("%s: latency %lluus\n", __entry->name,
- (unsigned long long) __entry->lat)
-);
-
-/**
- * wbt_step - trace wb event step
- * @msg: context message
- * @step: the current scale step count
- * @window: the current monitoring window
- * @bg: the current background queue limit
- * @normal: the current normal writeback limit
- * @max: the current max throughput writeback limit
- */
-TRACE_EVENT(wbt_step,
-
- TP_PROTO(struct backing_dev_info *bdi, const char *msg,
- int step, unsigned long window, unsigned int bg,
- unsigned int normal, unsigned int max),
-
- TP_ARGS(bdi, msg, step, window, bg, normal, max),
-
- TP_STRUCT__entry(
- __array(char, name, 32)
- __field(const char *, msg)
- __field(int, step)
- __field(unsigned long, window)
- __field(unsigned int, bg)
- __field(unsigned int, normal)
- __field(unsigned int, max)
- ),
-
- TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
- __entry->msg = msg;
- __entry->step = step;
- __entry->window = div_u64(window, 1000);
- __entry->bg = bg;
- __entry->normal = normal;
- __entry->max = max;
- ),
-
- TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u\n",
- __entry->name, __entry->msg, __entry->step, __entry->window,
- __entry->bg, __entry->normal, __entry->max)
-);
-
-/**
- * wbt_timer - trace wb timer event
- * @status: timer state status
- * @step: the current scale step count
- * @inflight: tracked writes inflight
- */
-TRACE_EVENT(wbt_timer,
-
- TP_PROTO(struct backing_dev_info *bdi, unsigned int status,
- int step, unsigned int inflight),
-
- TP_ARGS(bdi, status, step, inflight),
-
- TP_STRUCT__entry(
- __array(char, name, 32)
- __field(unsigned int, status)
- __field(int, step)
- __field(unsigned int, inflight)
- ),
-
- TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
- __entry->status = status;
- __entry->step = step;
- __entry->inflight = inflight;
- ),
-
- TP_printk("%s: status=%u, step=%d, inflight=%u\n", __entry->name,
- __entry->status, __entry->step, __entry->inflight)
-);
-
-#endif /* _TRACE_WBT_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 5deb0d188..899e5474e 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -397,6 +397,7 @@ header-y += string.h
header-y += suspend_ioctls.h
header-y += swab.h
header-y += synclink.h
+header-y += sync_file.h
header-y += sysctl.h
header-y += sysinfo.h
header-y += target_core_user.h
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index cf7a95286..7c4ddc734 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -760,6 +760,13 @@ static inline bool task_queued(struct task_struct *p)
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
static inline void resched_if_idle(struct rq *rq);
+/* Dodgy workaround till we figure out where the softirqs are going */
+static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
+{
+ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
+ do_softirq_own_stack();
+}
+
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
@@ -814,7 +821,11 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
raw_spin_unlock(&prev->pi_lock);
}
#endif
- raw_spin_unlock_irq(&rq->lock);
+ rq_unlock(rq);
+
+ do_pending_softirq(rq, current);
+
+ local_irq_enable();
}
static inline bool deadline_before(u64 deadline, u64 time)
@@ -2556,7 +2567,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* past. prev == current is still correct but we need to recalculate this_rq
* because prev may have moved to another CPU.
*/
-static struct rq *finish_task_switch(struct task_struct *prev)
+static void finish_task_switch(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
@@ -2609,7 +2620,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
kprobe_flush_task(prev);
put_task_struct(prev);
}
- return rq;
}
/**
@@ -2617,10 +2627,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* @prev: the thread we just switched away from.
*/
asmlinkage __visible void schedule_tail(struct task_struct *prev)
- __releases(rq->lock)
{
- struct rq *rq;
-
/*
* New tasks start with FORK_PREEMPT_COUNT, see there and
* finish_task_switch() for details.
@@ -2630,7 +2637,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
* PREEMPT_COUNT kernels).
*/
- rq = finish_task_switch(prev);
+ finish_task_switch(prev);
preempt_enable();
if (current->set_child_tid)
@@ -2640,7 +2647,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
/*
* context_switch - switch to the new MM and the new thread's register state.
*/
-static __always_inline struct rq *
+static __always_inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
@@ -2680,7 +2687,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
switch_to(prev, next, prev);
barrier();
- return finish_task_switch(prev);
+ finish_task_switch(prev);
}
/*
@@ -3854,10 +3861,12 @@ static void __sched notrace __schedule(bool preempt)
++*switch_count;
trace_sched_switch(preempt, prev, next);
- rq = context_switch(rq, prev, next); /* unlocks the rq */
+ context_switch(rq, prev, next); /* unlocks the rq */
} else {
check_siblings(rq);
- rq_unlock_irq(rq);
+ rq_unlock(rq);
+ do_pending_softirq(rq, next);
+ local_irq_enable();
}
}
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
index f9510d739..3565a7d8a 100644
--- a/kernel/sched/MuQSS.h
+++ b/kernel/sched/MuQSS.h
@@ -1,5 +1,6 @@
#include <linux/sched.h>
#include <linux/cpuidle.h>
+#include <linux/interrupt.h>
#include <linux/skip_list.h>
#include <linux/stop_machine.h>
#include "cpuacct.h"
@@ -325,4 +326,18 @@ static inline void cpufreq_trigger(u64 time, unsigned long util)
#define arch_scale_freq_invariant() (false)
#endif
+/*
+ * This should only be called when current == rq->idle. Dodgy workaround for
+ * when softirqs are pending and we are in the idle loop. Setting current to
+ * resched will kick us out of the idle loop and the softirqs will be serviced
+ * on our next pass through schedule().
+ */
+static inline bool softirq_pending(int cpu)
+{
+ if (likely(!local_softirq_pending()))
+ return false;
+ set_tsk_need_resched(current);
+ return true;
+}
+
#endif /* MUQSS_SCHED_H */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 060b76d85..51264e6b1 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -208,6 +208,8 @@ static void cpu_idle_loop(void)
int cpu = smp_processor_id();
while (1) {
+ bool pending = false;
+
/*
* If the arch has a polling bit, we maintain an invariant:
*
@@ -219,7 +221,10 @@ static void cpu_idle_loop(void)
__current_set_polling();
quiet_vmstat();
- tick_nohz_idle_enter();
+ if (unlikely(softirq_pending(cpu)))
+ pending = true;
+ else
+ tick_nohz_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -259,7 +264,8 @@ static void cpu_idle_loop(void)
* not have had an IPI to fold the state for us.
*/
preempt_set_need_resched();
- tick_nohz_idle_exit();
+ if (!pending)
+ tick_nohz_idle_exit();
__current_clr_polling();
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c64fc5114..cdefab6df 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1813,3 +1813,8 @@ static inline void cpufreq_trigger_update(u64 time) {}
#else /* arch_scale_freq_capacity */
#define arch_scale_freq_invariant() (false)
#endif
+
+static inline bool softirq_pending(int cpu)
+{
+ return false;
+}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 32bf6f75a..96db64bde 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
{
#ifdef CONFIG_SMP
if ((tflags & TIMER_PINNED) || !base->migration_enabled)
@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base)
{
+ unsigned long jnow = READ_ONCE(jiffies);
+
/*
* We only forward the base when it's idle and we have a delta between
* base clock and jiffies.
*/
- if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+ if (!base->is_idle || (long) (jnow - base->clk) < 2)
return;
/*
* If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value.
*/
- if (time_after(base->next_expiry, jiffies))
- base->clk = jiffies;
+ if (time_after(base->next_expiry, jnow))
+ base->clk = jnow;
else
base->clk = base->next_expiry;
}
#else
static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
{
return get_timer_this_cpu_base(tflags);
}
@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) { }
#endif
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
- struct timer_base *target = __get_target_base(base, tflags);
-
- forward_timer_base(target);
- return target;
-}
/*
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
{
for (;;) {
struct timer_base *base;
- u32 tf = timer->flags;
+ u32 tf;
+
+ /*
+ * We need to use READ_ONCE() here, otherwise the compiler
+ * might re-read @tf between the check for TIMER_MIGRATING
+ * and spin_lock().
+ */
+ tf = READ_ONCE(timer->flags);
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
unsigned long clk = 0, flags;
int ret = 0;
+ BUG_ON(!timer->function);
+
/*
* This is a common optimization triggered by the networking code - if
* the timer is re-modified to have the same timeout or ends up in the
@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
if (timer_pending(timer)) {
if (timer->expires == expires)
return 1;
+
/*
- * Take the current timer_jiffies of base, but without holding
- * the lock!
+ * We lock timer base and calculate the bucket index right
+ * here. If the timer ends up in the same bucket, then we
+ * just update the expiry time and avoid the whole
+ * dequeue/enqueue dance.
*/
- base = get_timer_base(timer->flags);
- clk = base->clk;
+ base = lock_timer_base(timer, &flags);
+ clk = base->clk;
idx = calc_wheel_index(expires, clk);
/*
@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
*/
if (idx == timer_get_idx(timer)) {
timer->expires = expires;
- return 1;
+ ret = 1;
+ goto out_unlock;
}
+ } else {
+ base = lock_timer_base(timer, &flags);
}
timer_stats_timer_set_start_info(timer);
- BUG_ON(!timer->function);
-
- base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, false);
if (!ret && pending_only)
@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
}
}
+ /* Try to forward a stale timer base clock */
+ forward_timer_base(base);
+
timer->expires = expires;
/*
* If 'idx' was calculated above and the base time did not advance
- * between calculating 'idx' and taking the lock, only enqueue_timer()
- * and trigger_dyntick_cpu() is required. Otherwise we need to
- * (re)calculate the wheel index via internal_add_timer().
+ * between calculating 'idx' and possibly switching the base, only
+ * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
+ * we need to (re)calculate the wheel index via
+ * internal_add_timer().
*/
if (idx != UINT_MAX && clk == base->clk) {
enqueue_timer(base, timer, idx);
@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
/*
- * We have a fresh next event. Check whether we can forward the base:
+ * We have a fresh next event. Check whether we can forward the
+ * base. We can only do that when @basej is past base->clk
+ * otherwise we might rewind base->clk.
*/
- if (time_after(nextevt, jiffies))
- base->clk = jiffies;
- else if (time_after(nextevt, base->clk))
- base->clk = nextevt;
+ if (time_after(basej, base->clk)) {
+ if (time_after(nextevt, basej))
+ base->clk = basej;
+ else if (time_after(nextevt, base->clk))
+ base->clk = nextevt;
+ }
if (time_before_eq(nextevt, basej)) {
expires = basem;
diff --git a/lib/Kconfig b/lib/Kconfig
index c585e4c40..d79909dc0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -550,7 +550,4 @@ config STACKDEPOT
bool
select STACKTRACE
-config WBT
- bool
-
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index 5bd016bd3..b63a82375 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -177,7 +177,6 @@ obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_SG_POOL) += sg_pool.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
-obj-$(CONFIG_WBT) += wbt.o
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
diff --git a/lib/wbt.c b/lib/wbt.c
deleted file mode 100644
index 257c7b099..000000000
--- a/lib/wbt.c
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- * buffered writeback throttling. loosely based on CoDel. We can't drop
- * packets for IO scheduling, so the logic is something like this:
- *
- * - Monitor latencies in a defined window of time.
- * - If the minimum latency in the above window exceeds some target, increment
- * scaling step and scale down queue depth by a factor of 2x. The monitoring
- * window is then shrunk to 100 / sqrt(scaling step + 1).
- * - For any window where we don't have solid data on what the latencies
- * look like, retain status quo.
- * - If latencies look good, decrement scaling step.
- * - If we're only doing writes, allow the scaling step to go negative. This
- * will temporarily boost write performance, snapping back to a stable
- * scaling step of 0 if reads show up or the heavy writers finish. Unlike
- * positive scaling steps where we shrink the monitoring window, a negative
- * scaling step retains the default step==0 window size.
- *
- * Copyright (C) 2016 Jens Axboe
- *
- */
-#include <linux/kernel.h>
-#include <linux/blk_types.h>
-#include <linux/slab.h>
-#include <linux/backing-dev.h>
-#include <linux/wbt.h>
-#include <linux/swap.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/wbt.h>
-
-enum {
- /*
- * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
- * from here depending on device stats
- */
- RWB_DEF_DEPTH = 16,
-
- /*
- * 100msec window
- */
- RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
-
- /*
- * Disregard stats, if we don't meet this minimum
- */
- RWB_MIN_WRITE_SAMPLES = 3,
-
- /*
- * If we have this number of consecutive windows with not enough
- * information to scale up or down, scale up.
- */
- RWB_UNKNOWN_BUMP = 5,
-};
-
-static inline bool rwb_enabled(struct rq_wb *rwb)
-{
- return rwb && rwb->wb_normal != 0;
-}
-
-/*
- * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
- * false if 'v' + 1 would be bigger than 'below'.
- */
-static bool atomic_inc_below(atomic_t *v, int below)
-{
- int cur = atomic_read(v);
-
- for (;;) {
- int old;
-
- if (cur >= below)
- return false;
- old = atomic_cmpxchg(v, cur, cur + 1);
- if (old == cur)
- break;
- cur = old;
- }
-
- return true;
-}
-
-static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
-{
- if (rwb_enabled(rwb)) {
- const unsigned long cur = jiffies;
-
- if (cur != *var)
- *var = cur;
- }
-}
-
-/*
- * If a task was rate throttled in balance_dirty_pages() within the last
- * second or so, use that to indicate a higher cleaning rate.
- */
-static bool wb_recent_wait(struct rq_wb *rwb)
-{
- struct bdi_writeback *wb = &rwb->bdi->wb;
-
- return time_before(jiffies, wb->dirty_sleep + HZ);
-}
-
-static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd)
-{
- return &rwb->rq_wait[is_kswapd];
-}
-
-static void rwb_wake_all(struct rq_wb *rwb)
-{
- int i;
-
- for (i = 0; i < WBT_NUM_RWQ; i++) {
- struct rq_wait *rqw = &rwb->rq_wait[i];
-
- if (waitqueue_active(&rqw->wait))
- wake_up_all(&rqw->wait);
- }
-}
-
-void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
-{
- struct rq_wait *rqw;
- int inflight, limit;
-
- if (!(wb_acct & WBT_TRACKED))
- return;
-
- rqw = get_rq_wait(rwb, wb_acct & WBT_KSWAPD);
- inflight = atomic_dec_return(&rqw->inflight);
-
- /*
- * wbt got disabled with IO in flight. Wake up any potential
- * waiters, we don't have to do more than that.
- */
- if (unlikely(!rwb_enabled(rwb))) {
- rwb_wake_all(rwb);
- return;
- }
-
- /*
- * If the device does write back caching, drop further down
- * before we wake people up.
- */
- if (rwb->wc && !wb_recent_wait(rwb))
- limit = 0;
- else
- limit = rwb->wb_normal;
-
- /*
- * Don't wake anyone up if we are above the normal limit.
- */
- if (inflight && inflight >= limit)
- return;
-
- if (waitqueue_active(&rqw->wait)) {
- int diff = limit - inflight;
-
- if (!inflight || diff >= rwb->wb_background / 2)
- wake_up(&rqw->wait);
- }
-}
-
-/*
- * Called on completion of a request. Note that it's also called when
- * a request is merged, when the request gets freed.
- */
-void wbt_done(struct rq_wb *rwb, struct wb_issue_stat *stat)
-{
- if (!rwb)
- return;
-
- if (!wbt_is_tracked(stat)) {
- if (rwb->sync_cookie == stat) {
- rwb->sync_issue = 0;
- rwb->sync_cookie = NULL;
- }
-
- if (wbt_is_read(stat))
- wb_timestamp(rwb, &rwb->last_comp);
- wbt_clear_state(stat);
- } else {
- WARN_ON_ONCE(stat == rwb->sync_cookie);
- __wbt_done(rwb, wbt_stat_to_mask(stat));
- wbt_clear_state(stat);
- }
-}
-
-/*
- * Return true, if we can't increase the depth further by scaling
- */
-static bool calc_wb_limits(struct rq_wb *rwb)
-{
- unsigned int depth;
- bool ret = false;
-
- if (!rwb->min_lat_nsec) {
- rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
- return false;
- }
-
- /*
- * For QD=1 devices, this is a special case. It's important for those
- * to have one request ready when one completes, so force a depth of
- * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
- * since the device can't have more than that in flight. If we're
- * scaling down, then keep a setting of 1/1/1.
- */
- if (rwb->queue_depth == 1) {
- if (rwb->scale_step > 0)
- rwb->wb_max = rwb->wb_normal = 1;
- else {
- rwb->wb_max = rwb->wb_normal = 2;
- ret = true;
- }
- rwb->wb_background = 1;
- } else {
- /*
- * scale_step == 0 is our default state. If we have suffered
- * latency spikes, step will be > 0, and we shrink the
- * allowed write depths. If step is < 0, we're only doing
- * writes, and we allow a temporarily higher depth to
- * increase performance.
- */
- depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
- if (rwb->scale_step > 0)
- depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
- else if (rwb->scale_step < 0) {
- unsigned int maxd = 3 * rwb->queue_depth / 4;
-
- depth = 1 + ((depth - 1) << -rwb->scale_step);
- if (depth > maxd) {
- depth = maxd;
- ret = true;
- }
- }
-
- /*
- * Set our max/normal/bg queue depths based on how far
- * we have scaled down (->scale_step).
- */
- rwb->wb_max = depth;
- rwb->wb_normal = (rwb->wb_max + 1) / 2;
- rwb->wb_background = (rwb->wb_max + 3) / 4;
- }
-
- return ret;
-}
-
-static bool inline stat_sample_valid(struct blk_rq_stat *stat)
-{
- /*
- * We need at least one read sample, and a minimum of
- * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
- * that it's writes impacting us, and not just some sole read on
- * a device that is in a lower power state.
- */
- return stat[0].nr_samples >= 1 &&
- stat[1].nr_samples >= RWB_MIN_WRITE_SAMPLES;
-}
-
-static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
-{
- u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
-
- if (!issue || !rwb->sync_cookie)
- return 0;
-
- now = ktime_to_ns(ktime_get());
- return now - issue;
-}
-
-enum {
- LAT_OK = 1,
- LAT_UNKNOWN,
- LAT_UNKNOWN_WRITES,
- LAT_EXCEEDED,
-};
-
-static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
-{
- u64 thislat;
-
- /*
- * If our stored sync issue exceeds the window size, or it
- * exceeds our min target AND we haven't logged any entries,
- * flag the latency as exceeded. wbt works off completion latencies,
- * but for a flooded device, a single sync IO can take a long time
- * to complete after being issued. If this time exceeds our
- * monitoring window AND we didn't see any other completions in that
- * window, then count that sync IO as a violation of the latency.
- */
- thislat = rwb_sync_issue_lat(rwb);
- if (thislat > rwb->cur_win_nsec ||
- (thislat > rwb->min_lat_nsec && !stat[0].nr_samples)) {
- trace_wbt_lat(rwb->bdi, thislat);
- return LAT_EXCEEDED;
- }
-
- /*
- * No read/write mix, if stat isn't valid
- */
- if (!stat_sample_valid(stat)) {
- /*
- * If we had writes in this stat window and the window is
- * current, we're only doing writes. If a task recently
- * waited or still has writes in flights, consider us doing
- * just writes as well.
- */
- if ((stat[1].nr_samples && rwb->stat_ops->is_current(stat)) ||
- wb_recent_wait(rwb) || wbt_inflight(rwb))
- return LAT_UNKNOWN_WRITES;
- return LAT_UNKNOWN;
- }
-
- /*
- * If the 'min' latency exceeds our target, step down.
- */
- if (stat[0].min > rwb->min_lat_nsec) {
- trace_wbt_lat(rwb->bdi, stat[0].min);
- trace_wbt_stat(rwb->bdi, stat);
- return LAT_EXCEEDED;
- }
-
- if (rwb->scale_step)
- trace_wbt_stat(rwb->bdi, stat);
-
- return LAT_OK;
-}
-
-static int latency_exceeded(struct rq_wb *rwb)
-{
- struct blk_rq_stat stat[2];
-
- rwb->stat_ops->get(rwb->ops_data, stat);
- return __latency_exceeded(rwb, stat);
-}
-
-static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
-{
- trace_wbt_step(rwb->bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
- rwb->wb_background, rwb->wb_normal, rwb->wb_max);
-}
-
-static void scale_up(struct rq_wb *rwb)
-{
- /*
- * Hit max in previous round, stop here
- */
- if (rwb->scaled_max)
- return;
-
- rwb->scale_step--;
- rwb->unknown_cnt = 0;
- rwb->stat_ops->clear(rwb->ops_data);
-
- rwb->scaled_max = calc_wb_limits(rwb);
-
- rwb_wake_all(rwb);
-
- rwb_trace_step(rwb, "step up");
-}
-
-/*
- * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
- */
-static void scale_down(struct rq_wb *rwb, bool hard_throttle)
-{
- /*
- * Stop scaling down when we've hit the limit. This also prevents
- * ->scale_step from going to crazy values, if the device can't
- * keep up.
- */
- if (rwb->wb_max == 1)
- return;
-
- if (rwb->scale_step < 0 && hard_throttle)
- rwb->scale_step = 0;
- else
- rwb->scale_step++;
-
- rwb->scaled_max = false;
- rwb->unknown_cnt = 0;
- rwb->stat_ops->clear(rwb->ops_data);
- calc_wb_limits(rwb);
- rwb_trace_step(rwb, "step down");
-}
-
-static void rwb_arm_timer(struct rq_wb *rwb)
-{
- unsigned long expires;
-
- if (rwb->scale_step > 0) {
- /*
- * We should speed this up, using some variant of a fast
- * integer inverse square root calculation. Since we only do
- * this for every window expiration, it's not a huge deal,
- * though.
- */
- rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
- int_sqrt((rwb->scale_step + 1) << 8));
- } else {
- /*
- * For step < 0, we don't want to increase/decrease the
- * window size.
- */
- rwb->cur_win_nsec = rwb->win_nsec;
- }
-
- expires = jiffies + nsecs_to_jiffies(rwb->cur_win_nsec);
- mod_timer(&rwb->window_timer, expires);
-}
-
-static void wb_timer_fn(unsigned long data)
-{
- struct rq_wb *rwb = (struct rq_wb *) data;
- unsigned int inflight = wbt_inflight(rwb);
- int status;
-
- status = latency_exceeded(rwb);
-
- trace_wbt_timer(rwb->bdi, status, rwb->scale_step, inflight);
-
- /*
- * If we exceeded the latency target, step down. If we did not,
- * step one level up. If we don't know enough to say either exceeded
- * or ok, then don't do anything.
- */
- switch (status) {
- case LAT_EXCEEDED:
- scale_down(rwb, true);
- break;
- case LAT_OK:
- scale_up(rwb);
- break;
- case LAT_UNKNOWN_WRITES:
- scale_up(rwb);
- break;
- case LAT_UNKNOWN:
- if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
- break;
- /*
- * We get here for two reasons:
- *
- * 1) We previously scaled reduced depth, and we currently
- * don't have a valid read/write sample. For that case,
- * slowly return to center state (step == 0).
- * 2) We started a the center step, but don't have a valid
- * read/write sample, but we do have writes going on.
- * Allow step to go negative, to increase write perf.
- */
- if (rwb->scale_step > 0)
- scale_up(rwb);
- else if (rwb->scale_step < 0)
- scale_down(rwb, false);
- break;
- default:
- break;
- }
-
- /*
- * Re-arm timer, if we have IO in flight
- */
- if (rwb->scale_step || inflight)
- rwb_arm_timer(rwb);
-}
-
-void wbt_update_limits(struct rq_wb *rwb)
-{
- rwb->scale_step = 0;
- rwb->scaled_max = false;
- calc_wb_limits(rwb);
-
- rwb_wake_all(rwb);
-}
-
-static bool close_io(struct rq_wb *rwb)
-{
- const unsigned long now = jiffies;
-
- return time_before(now, rwb->last_issue + HZ / 10) ||
- time_before(now, rwb->last_comp + HZ / 10);
-}
-
-#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
-
-static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
-{
- unsigned int limit;
-
- /*
- * At this point we know it's a buffered write. If this is
- * kswapd trying to free memory, or REQ_SYNC is set, set, then
- * it's WB_SYNC_ALL writeback, and we'll use the max limit for
- * that. If the write is marked as a background write, then use
- * the idle limit, or go to normal if we haven't had competing
- * IO for a bit.
- */
- if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
- limit = rwb->wb_max;
- else if ((rw & REQ_BG) || close_io(rwb)) {
- /*
- * If less than 100ms since we completed unrelated IO,
- * limit us to half the depth for background writeback.
- */
- limit = rwb->wb_background;
- } else
- limit = rwb->wb_normal;
-
- return limit;
-}
-
-static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
- unsigned long rw)
-{
- /*
- * inc it here even if disabled, since we'll dec it at completion.
- * this only happens if the task was sleeping in __wbt_wait(),
- * and someone turned it off at the same time.
- */
- if (!rwb_enabled(rwb)) {
- atomic_inc(&rqw->inflight);
- return true;
- }
-
- return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
-}
-
-/*
- * Block if we will exceed our limit, or if we are currently waiting for
- * the timer to kick off queuing again.
- */
-static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
-{
- struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
- DEFINE_WAIT(wait);
-
- if (may_queue(rwb, rqw, rw))
- return;
-
- do {
- prepare_to_wait_exclusive(&rqw->wait, &wait,
- TASK_UNINTERRUPTIBLE);
-
- if (may_queue(rwb, rqw, rw))
- break;
-
- if (lock)
- spin_unlock_irq(lock);
-
- io_schedule();
-
- if (lock)
- spin_lock_irq(lock);
- } while (1);
-
- finish_wait(&rqw->wait, &wait);
-}
-
-static inline bool wbt_should_throttle(struct rq_wb *rwb, unsigned int rw)
-{
- const int op = rw >> BIO_OP_SHIFT;
-
- /*
- * If not a WRITE (or a discard), do nothing
- */
- if (!(op == REQ_OP_WRITE || op == REQ_OP_DISCARD))
- return false;
-
- /*
- * Don't throttle WRITE_ODIRECT
- */
- if ((rw & (REQ_SYNC | REQ_NOIDLE)) == REQ_SYNC)
- return false;
-
- return true;
-}
-
-/*
- * Returns true if the IO request should be accounted, false if not.
- * May sleep, if we have exceeded the writeback limits. Caller can pass
- * in an irq held spinlock, if it holds one when calling this function.
- * If we do sleep, we'll release and re-grab it.
- */
-unsigned int wbt_wait(struct rq_wb *rwb, unsigned int rw, spinlock_t *lock)
-{
- unsigned int ret = 0;
-
- if (!rwb_enabled(rwb))
- return 0;
-
- if ((rw >> BIO_OP_SHIFT) == REQ_OP_READ)
- ret = WBT_READ;
-
- if (!wbt_should_throttle(rwb, rw)) {
- if (ret & WBT_READ)
- wb_timestamp(rwb, &rwb->last_issue);
- return ret;
- }
-
- __wbt_wait(rwb, rw, lock);
-
- if (!timer_pending(&rwb->window_timer))
- rwb_arm_timer(rwb);
-
- if (current_is_kswapd())
- ret |= WBT_KSWAPD;
-
- return ret | WBT_TRACKED;
-}
-
-void wbt_issue(struct rq_wb *rwb, struct wb_issue_stat *stat)
-{
- if (!rwb_enabled(rwb))
- return;
-
- wbt_issue_stat_set_time(stat);
-
- /*
- * Track sync issue, in case it takes a long time to complete. Allows
- * us to react quicker, if a sync IO takes a long time to complete.
- * Note that this is just a hint. 'stat' can go away when the
- * request completes, so it's important we never dereference it. We
- * only use the address to compare with, which is why we store the
- * sync_issue time locally.
- */
- if (wbt_is_read(stat) && !rwb->sync_issue) {
- rwb->sync_cookie = stat;
- rwb->sync_issue = wbt_issue_stat_get_time(stat);
- }
-}
-
-void wbt_requeue(struct rq_wb *rwb, struct wb_issue_stat *stat)
-{
- if (!rwb_enabled(rwb))
- return;
- if (stat == rwb->sync_cookie) {
- rwb->sync_issue = 0;
- rwb->sync_cookie = NULL;
- }
-}
-
-void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
-{
- if (rwb) {
- rwb->queue_depth = depth;
- wbt_update_limits(rwb);
- }
-}
-
-void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
-{
- if (rwb)
- rwb->wc = write_cache_on;
-}
-
-void wbt_disable(struct rq_wb *rwb)
-{
- if (rwb) {
- del_timer_sync(&rwb->window_timer);
- rwb->win_nsec = rwb->min_lat_nsec = 0;
- wbt_update_limits(rwb);
- }
-}
-EXPORT_SYMBOL_GPL(wbt_disable);
-
-struct rq_wb *wbt_init(struct backing_dev_info *bdi, struct wb_stat_ops *ops,
- void *ops_data)
-{
- struct rq_wb *rwb;
- int i;
-
- if (!ops->get || !ops->is_current || !ops->clear)
- return ERR_PTR(-EINVAL);
-
- rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
- if (!rwb)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < WBT_NUM_RWQ; i++) {
- atomic_set(&rwb->rq_wait[i].inflight, 0);
- init_waitqueue_head(&rwb->rq_wait[i].wait);
- }
-
- setup_timer(&rwb->window_timer, wb_timer_fn, (unsigned long) rwb);
- rwb->wc = 1;
- rwb->queue_depth = RWB_DEF_DEPTH;
- rwb->last_comp = rwb->last_issue = jiffies;
- rwb->bdi = bdi;
- rwb->win_nsec = RWB_WINDOW_NSEC;
- rwb->stat_ops = ops;
- rwb->ops_data = ops_data;
- wbt_update_limits(rwb);
- return rwb;
-}
-
-void wbt_exit(struct rq_wb *rwb)
-{
- if (rwb) {
- del_timer_sync(&rwb->window_timer);
- kfree(rwb);
- }
-}
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 3bfed5ab2..8fde443f3 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -310,7 +310,6 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
spin_lock_init(&wb->work_lock);
INIT_LIST_HEAD(&wb->work_list);
INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
- wb->dirty_sleep = jiffies;
wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
if (!wb->congested)
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 1d05cb9d3..234676e31 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
err = memcg_init_list_lru(lru, memcg_aware);
if (err) {
kfree(lru->node);
+ /* Do this so a list_lru_destroy() doesn't crash: */
+ lru->node = NULL;
goto out;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4be518d4e..dddead146 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1947,6 +1947,15 @@ retry:
current->flags & PF_EXITING))
goto force;
+ /*
+ * Prevent unbounded recursion when reclaim operations need to
+ * allocate memory. This might exceed the limits temporarily,
+ * but we prefer facilitating memory reclaim and getting back
+ * under the limit over triggering OOM kills in these cases.
+ */
+ if (unlikely(current->flags & PF_MEMALLOC))
+ goto force;
+
if (unlikely(task_in_memcg_oom(current)))
goto nomem;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 56be15224..1aeb51d24 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1786,7 +1786,6 @@ pause:
pause,
start_time);
__set_current_state(TASK_KILLABLE);
- wb->dirty_sleep = now;
io_schedule_timeout(pause);
current->dirty_paused_when = now + pause;
diff --git a/mm/slab.c b/mm/slab.c
index b67271024..525a91198 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -964,7 +964,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched().
*/
- if (force_change)
+ if (old_shared && force_change)
synchronize_sched();
fail:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fe8b7113..ba0fad78e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3048,7 +3048,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.gfp_mask,
sc.reclaim_idx);
+ current->flags |= PF_MEMALLOC;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+ current->flags &= ~PF_MEMALLOC;
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9dce3b157..59a960349 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2253,16 +2253,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- if (ieee80211_has_a4(hdr->frame_control) &&
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)
- return RX_DROP_UNUSABLE;
+ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+ switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (!rx->sdata->u.vlan.sta)
+ return RX_DROP_UNUSABLE;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sdata->u.mgd.use_4addr)
+ return RX_DROP_UNUSABLE;
+ break;
+ default:
+ return RX_DROP_UNUSABLE;
+ }
+ }
- if (is_multicast_ether_addr(hdr->addr1) &&
- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- rx->sdata->u.vlan.sta) ||
- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- rx->sdata->u.mgd.use_4addr)))
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
skb->dev = dev;
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 018eed7e1..8668a5c18 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
+ li.u.ulog.flags = 0;
if (info->flags & XT_NFLOG_F_COPY_LEN)
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index f826e8739..d942c7c2b 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,7 +41,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- select CRYPTO
+ depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
select CRYPTO_AES
select CRYPTO_ECB
select CRYPTO_RNG
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c0b3030b5..835c1ab30 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "big_key: "fmt
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/file.h>
@@ -341,44 +342,48 @@ error:
*/
static int __init big_key_init(void)
{
- return register_key_type(&key_type_big_key);
-}
-
-/*
- * Initialize big_key crypto and RNG algorithms
- */
-static int __init big_key_crypto_init(void)
-{
- int ret = -EINVAL;
+ struct crypto_skcipher *cipher;
+ struct crypto_rng *rng;
+ int ret;
- /* init RNG */
- big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
- if (IS_ERR(big_key_rng)) {
- big_key_rng = NULL;
- return -EFAULT;
+ rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+ if (IS_ERR(rng)) {
+ pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+ return PTR_ERR(rng);
}
+ big_key_rng = rng;
+
/* seed RNG */
- ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
- if (ret)
- goto error;
+ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+ if (ret) {
+ pr_err("Can't reset rng: %d\n", ret);
+ goto error_rng;
+ }
/* init block cipher */
- big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
- 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(big_key_skcipher)) {
- big_key_skcipher = NULL;
- ret = -EFAULT;
- goto error;
+ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher)) {
+ ret = PTR_ERR(cipher);
+ pr_err("Can't alloc crypto: %d\n", ret);
+ goto error_rng;
+ }
+
+ big_key_skcipher = cipher;
+
+ ret = register_key_type(&key_type_big_key);
+ if (ret < 0) {
+ pr_err("Can't register type: %d\n", ret);
+ goto error_cipher;
}
return 0;
-error:
+error_cipher:
+ crypto_free_skcipher(big_key_skcipher);
+error_rng:
crypto_free_rng(big_key_rng);
- big_key_rng = NULL;
return ret;
}
-device_initcall(big_key_init);
-late_initcall(big_key_crypto_init);
+late_initcall(big_key_init);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index f0611a636..b9f531c9e 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct timespec now;
unsigned long timo;
key_ref_t key_ref, skey_ref;
- char xbuf[12];
+ char xbuf[16];
int rc;
struct keyring_search_context ctx = {
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index dcc102813..37d9cfbc2 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
ktime_get_ts64(&tm);
tm = timespec64_sub(tm, tmr->last_update);
- cur_time.tv_nsec = tm.tv_nsec;
- cur_time.tv_sec = tm.tv_sec;
+ cur_time.tv_nsec += tm.tv_nsec;
+ cur_time.tv_sec += tm.tv_sec;
snd_seq_sanity_real_time(&cur_time);
}
spin_unlock_irqrestore(&tmr->lock, flags);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 160c7f713..487fcbf94 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -340,8 +340,7 @@ enum {
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
- (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
- AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
AZX_DCAPS_SNOOP_TYPE(NVIDIA))
#define AZX_DCAPS_PRESET_CTHDA \
@@ -1699,6 +1698,10 @@ static int azx_first_init(struct azx *chip)
}
}
+ /* NVidia hardware normally only supports up to 40 bits of DMA */
+ if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
+ dma_bits = 40;
+
/* disable 64bit DMA address on some devices */
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
dev_dbg(card->dev, "Disabling 64bit DMA\n");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bd481ac23..26e866f65 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5809,8 +5809,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
#define ALC295_STANDARD_PINS \
{0x12, 0xb7a60130}, \
{0x14, 0x90170110}, \
- {0x17, 0x21014020}, \
- {0x18, 0x21a19030}, \
{0x21, 0x04211020}
#define ALC298_STANDARD_PINS \
@@ -5857,11 +5855,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1b, 0x02011020},
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
{0x1b, 0x01014020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
{0x1b, 0x02011020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -6037,7 +6043,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC295_STANDARD_PINS),
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014020},
+ {0x18, 0x21a19030}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014040},
+ {0x18, 0x21a19050}),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x17, 0x90170110}),
@@ -6611,6 +6623,7 @@ enum {
ALC891_FIXUP_HEADSET_MODE,
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
ALC662_FIXUP_ACER_VERITON,
+ ALC892_FIXUP_ASROCK_MOBO,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6887,6 +6900,16 @@ static const struct hda_fixup alc662_fixups[] = {
{ }
}
},
+ [ALC892_FIXUP_ASROCK_MOBO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x40f000f0 }, /* disabled */
+ { 0x16, 0x40f000f0 }, /* disabled */
+ { 0x18, 0x01014011 }, /* LO */
+ { 0x1a, 0x01014012 }, /* LO */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6924,6 +6947,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c60a776e8..8a59d4782 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+/* Syntek STK1160 */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x05e1,
+ .idProduct = 0x0408,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Syntek",
+ .product_name = "STK1160",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER
+ }
+},
+
/* Digidesign Mbox */
{
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */