summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl7
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/scheduler/sched-BFS.txt216
-rw-r--r--Documentation/scheduler/sched-MuQSS.txt289
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/signal.c8
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi23
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts3
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi3
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c24
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c13
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c24
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c2
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.h2
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c9
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/percpu.h120
-rw-r--r--arch/arm64/include/asm/uaccess.h8
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c36
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/traps.c27
-rw-r--r--arch/arm64/kvm/hyp/entry.S2
-rw-r--r--arch/metag/include/asm/atomic.h3
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/vdso/Makefile2
-rw-r--r--arch/parisc/include/asm/pgtable.h6
-rw-r--r--arch/parisc/kernel/setup.c8
-rw-r--r--arch/parisc/kernel/time.c6
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S7
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c8
-rw-r--r--arch/powerpc/kernel/nvram_64.c6
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S2
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/powerpc/lib/copyuser_64.S2
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci.c4
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/sysdev/cpm1.c2
-rw-r--r--arch/powerpc/sysdev/cpm2.c4
-rw-r--r--arch/powerpc/sysdev/cpm_common.c15
-rw-r--r--arch/powerpc/xmon/spr_access.S4
-rw-r--r--arch/s390/kvm/intercept.c9
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig2
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/early-quirks.c9
-rw-r--r--arch/x86/kernel/smpboot.c16
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/platform/uv/bios_uv.c10
-rw-r--r--block/bfq-iosched.c3
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--crypto/gcm.c2
-rw-r--r--drivers/base/platform.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c23
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-qoriq.c6
-rw-r--r--drivers/clk/clk.c14
-rw-r--r--drivers/clk/imx/clk-imx35.c2
-rw-r--r--drivers/clk/imx/clk-imx6q.c46
-rw-r--r--drivers/clk/qcom/Kconfig2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c2
-rw-r--r--drivers/crypto/marvell/cesa.c1
-rw-r--r--drivers/crypto/marvell/hash.c15
-rw-r--r--drivers/dma/ipu/ipu_irq.c9
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c3
-rw-r--r--drivers/gpu/drm/drm_prime.c17
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c72
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c43
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c453
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c14
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h19
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c76
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c3
-rw-r--r--drivers/iio/dac/ad5755.c2
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c14
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1
-rw-r--r--drivers/infiniband/hw/qib/qib.h1
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c9
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c8
-rw-r--r--drivers/input/mouse/elantech.c25
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-ip22io.h2
-rw-r--r--drivers/input/serio/i8042-ppcio.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h2
-rw-r--r--drivers/input/serio/i8042-unicore32io.h2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h96
-rw-r--r--drivers/input/serio/i8042.c55
-rw-r--r--drivers/irqchip/irq-eznps.c4
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/md/dm-crypt.c24
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-rq.c19
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c104
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c27
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/misc/cxl/api.c9
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h24
-rw-r--r--drivers/misc/cxl/file.c11
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/mei/amthif.c2
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/pci-me.c3
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c10
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mtd/ubi/wl.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c57
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c4
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/core.c73
-rw-r--r--drivers/pci/host/pci-aardvark.c8
-rw-r--r--drivers/pci/host/pci-host-common.c8
-rw-r--r--drivers/pci/host/pci-tegra.c9
-rw-r--r--drivers/pci/host/pci-versatile.c8
-rw-r--r--drivers/pci/host/pcie-designware.c21
-rw-r--r--drivers/pci/host/pcie-rcar.c9
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/power/bq24257_charger.c12
-rw-r--r--drivers/regulator/tps65910-regulator.c6
-rw-r--r--drivers/s390/char/con3270.c11
-rw-r--r--drivers/s390/cio/chsc.c20
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c162
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_erp.c12
-rw-r--r--drivers/s390/scsi/zfcp_ext.h8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c8
-rw-r--r--drivers/scsi/cxlflash/main.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/soc/fsl/qe/gpio.c3
-rw-r--r--drivers/soc/fsl/qe/qe_common.c8
-rw-r--r--drivers/spi/spi-fsl-dspi.c1
-rw-r--r--drivers/staging/android/ion/Kconfig1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c4
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c2
-rw-r--r--drivers/target/target_core_transport.c27
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c2
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--fs/9p/acl.c40
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/ceph/acl.c6
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsglob.h30
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/cifs/file.c66
-rw-r--r--fs/cifs/misc.c15
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/cifs/smb2glob.h10
-rw-r--r--fs/cifs/smb2inode.c6
-rw-r--r--fs/cifs/smb2misc.c16
-rw-r--r--fs/cifs/smb2ops.c12
-rw-r--r--fs/cifs/smb2pdu.c25
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/crypto/crypto.c15
-rw-r--r--fs/crypto/policy.c4
-rw-r--r--fs/ext2/acl.c12
-rw-r--r--fs/ext4/acl.c12
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/f2fs/acl.c6
-rw-r--r--fs/gfs2/acl.c12
-rw-r--r--fs/hfsplus/posix_acl.c4
-rw-r--r--fs/isofs/inode.c8
-rw-r--r--fs/jbd2/transaction.c3
-rw-r--r--fs/jffs2/acl.c9
-rw-r--r--fs/jfs/acl.c6
-rw-r--r--fs/nfs/blocklayout/blocklayout.c3
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/dir.c16
-rw-r--r--fs/nfs/nfs42proc.c1
-rw-r--r--fs/nfs/nfs4state.c3
-rw-r--r--fs/nfsd/nfssvc.c18
-rw-r--r--fs/ocfs2/acl.c10
-rw-r--r--fs/orangefs/acl.c15
-rw-r--r--fs/overlayfs/copy_up.c12
-rw-r--r--fs/overlayfs/dir.c5
-rw-r--r--fs/posix_acl.c31
-rw-r--r--fs/pstore/ram.c17
-rw-r--r--fs/pstore/ram_core.c49
-rw-r--r--fs/reiserfs/xattr_acl.c8
-rw-r--r--fs/super.c6
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/xfs/xfs_acl.c13
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/linux/cpufreq.h104
-rw-r--r--include/linux/devfreq-event.h5
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/libnvdimm.h2
-rw-r--r--include/linux/posix_acl.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/Kconfig.hz1
-rw-r--r--kernel/irq/generic-chip.c21
-rw-r--r--kernel/sched/Makefile4
-rw-r--r--kernel/sched/MuQSS.c799
-rw-r--r--kernel/sched/MuQSS.h57
-rw-r--r--kernel/sched/cputime.c7
-rw-r--r--kernel/sched/fair.c38
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/clockevents.c5
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/hugetlb.c34
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--net/sunrpc/xprtsock.c11
-rw-r--r--sound/pci/hda/dell_wmi_helper.c2
-rw-r--r--sound/pci/hda/thinkpad_helper.c2
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c12
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c14
-rw-r--r--sound/soc/soc-dapm.c6
-rw-r--r--sound/soc/soc-topology.c1
-rw-r--r--tools/perf/perf-sys.h1
-rw-r--r--tools/perf/ui/browsers/hists.c7
-rw-r--r--tools/perf/ui/stdio/hist.c14
-rw-r--r--tools/perf/util/data-convert-bt.c2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c38
-rw-r--r--tools/perf/util/intel-pt.c15
-rw-r--r--tools/perf/util/symbol-elf.c3
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/spi/spidev_test.c2
301 files changed, 3488 insertions, 2178 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 4ba0a2a61..640f65e79 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/reset
Date: October 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
- Writing 1 will issue a PERST to card which may cause the card
- to reload the FPGA depending on load_image_on_perst.
+ Writing 1 will issue a PERST to card provided there are no
+ contexts active on any one of the card AFUs. This may cause
+ the card to reload the FPGA depending on load_image_on_perst.
+ Writing -1 will do a force PERST irrespective of any active
+ contexts on the card AFUs.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a4f4d693e..46726d489 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1457,7 +1457,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
controllers
i8042.notimeout [HW] Ignore timeout condition signalled by controller
- i8042.reset [HW] Reset the controller during init and cleanup
+ i8042.reset [HW] Reset the controller during init, cleanup and
+ suspend-to-ram transitions, only during s2r
+ transitions, or never reset
+ Format: { 1 | Y | y | 0 | N | n }
+ 1, Y, y: always reset controller
+ 0, N, n: don't ever reset controller
+ Default: only on s2r transitions on x86; most other
+ architectures force reset to be always executed
i8042.unlock [HW] Unlock (ignore) the keylock
i8042.kbdreset [HW] Reset device connected to KBD port
diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
index 6470f30b0..c0282002a 100644
--- a/Documentation/scheduler/sched-BFS.txt
+++ b/Documentation/scheduler/sched-BFS.txt
@@ -13,14 +13,14 @@ one workload cause massive detriment to another.
Design summary.
-BFS is best described as a single runqueue, O(log n) insertion, O(1) lookup,
-earliest effective virtual deadline first design, loosely based on EEVDF
-(earliest eligible virtual deadline first) and my previous Staircase Deadline
-scheduler. Each component shall be described in order to understand the
-significance of, and reasoning for it. The codebase when the first stable
-version was released was approximately 9000 lines less code than the existing
-mainline linux kernel scheduler (in 2.6.31). This does not even take into
-account the removal of documentation and the cgroups code that is not used.
+BFS is best described as a single runqueue, O(n) lookup, earliest effective
+virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
+deadline first) and my previous Staircase Deadline scheduler. Each component
+shall be described in order to understand the significance of, and reasoning for
+it. The codebase when the first stable version was released was approximately
+9000 lines less code than the existing mainline linux kernel scheduler (in
+2.6.31). This does not even take into account the removal of documentation and
+the cgroups code that is not used.
Design reasoning.
@@ -62,13 +62,12 @@ Design details.
Task insertion.
-BFS inserts tasks into each relevant queue as an O(log n) insertion into a
-customised skip list (as described by William Pugh). At the time of insertion,
-*every* running queue is checked to see if the newly queued task can run on any
-idle queue, or preempt the lowest running task on the system. This is how the
-cross-CPU scheduling of BFS achieves significantly lower latency per extra CPU
-the system has. In this case the lookup is, in the worst case scenario, O(k)
-where k is the number of online CPUs on the system.
+BFS inserts tasks into each relevant queue as an O(1) insertion into a double
+linked list. On insertion, *every* running queue is checked to see if the newly
+queued task can run on any idle queue, or preempt the lowest running task on the
+system. This is how the cross-CPU scheduling of BFS achieves significantly lower
+latency per extra CPU the system has. In this case the lookup is, in the worst
+case scenario, O(n) where n is the number of CPUs on the system.
Data protection.
@@ -93,7 +92,7 @@ the virtual deadline mechanism is explained.
Virtual deadline.
The key to achieving low latency, scheduling fairness, and "nice level"
-distribution in BFS is entirely in the virtual deadline mechanism. The related
+distribution in BFS is entirely in the virtual deadline mechanism. The one
tunable in BFS is the rr_interval, or "round robin interval". This is the
maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
tasks of the same nice level will be running for, or looking at it the other
@@ -118,7 +117,7 @@ higher priority than a currently running task on any cpu by virtue of the fact
that it has an earlier virtual deadline than the currently running task. The
earlier deadline is the key to which task is next chosen for the first and
second cases. Once a task is descheduled, it is put back on the queue, and an
-O(1) lookup of all queued-but-not-running tasks is done to determine which has
+O(n) lookup of all queued-but-not-running tasks is done to determine which has
the earliest deadline and that task is chosen to receive CPU next.
The CPU proportion of different nice tasks works out to be approximately the
@@ -135,40 +134,26 @@ Task lookup.
BFS has 103 priority queues. 100 of these are dedicated to the static priority
of realtime tasks, and the remaining 3 are, in order of best to worst priority,
-SCHED_ISO (isochronous), SCHED_NORMAL/SCHED_BATCH, and SCHED_IDLEPRIO (idle
-priority scheduling).
-
-When a task of these priorities is queued, it is added to the skiplist with a
-different sorting value according to the type of task. For realtime tasks and
-isochronous tasks, it is their static priority. For SCHED_NORMAL and
-SCHED_BATCH tasks it is their virtual deadline value. For SCHED_IDLEPRIO tasks
-it is their virtual deadline value offset by an impossibly large value to ensure
-they never go before normal tasks. When isochronous or idleprio tasks do not
-meet the conditions that allow them to run with their special scheduling they
-are queued as per the remainder of the SCHED_NORMAL tasks.
-
-Lookup is performed by selecting the very first entry in the "level 0" skiplist
-as it will always be the lowest priority task having been sorted while being
-entered into the skiplist. This is usually an O(1) operation, however if there
-are tasks with limited affinity set and they are not able to run on the current
-CPU, the next in the list is checked and so on.
-
-Thus, the lookup for the common case is O(1) and O(n) in the worst case when
-the system has nothing but selectively affined tasks that can never run on the
-current CPU.
-
-
-Task removal.
-
-Removal of tasks in the skip list is an O(k) operation where 0 <= k < 16,
-corresponding with the "levels" in the skip list. 16 was chosen as the upper
-limit in the skiplist as it guarantees O(log n) insertion for up to 64k
-currently active tasks and most systems do not usually allow more than 32k
-tasks, and 16 levels makes the skiplist lookup components fit in 2 cachelines.
-The skiplist level chosen when inserting a task is pseudo-random but a minor
-optimisation is used to limit the max level based on the absolute number of
-queued tasks since high levels afford no advantage at low numbers of queued
-tasks yet increase overhead.
+SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
+scheduling). When a task of these priorities is queued, a bitmap of running
+priorities is set showing which of these priorities has tasks waiting for CPU
+time. When a CPU is made to reschedule, the lookup for the next task to get
+CPU time is performed in the following way:
+
+First the bitmap is checked to see what static priority tasks are queued. If
+any realtime priorities are found, the corresponding queue is checked and the
+first task listed there is taken (provided CPU affinity is suitable) and lookup
+is complete. If the priority corresponds to a SCHED_ISO task, they are also
+taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
+to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
+stage, every task in the runlist that corresponds to that priority is checked
+to see which has the earliest set deadline, and (provided it has suitable CPU
+affinity) it is taken off the runqueue and given the CPU. If a task has an
+expired deadline, it is taken and the rest of the lookup aborted (as they are
+chosen in FIFO order).
+
+Thus, the lookup is O(n) in the worst case only, where n is as described
+earlier, as tasks may be chosen before the whole task list is looked over.
Scalability.
@@ -191,17 +176,30 @@ when it has been deemed their overhead is so marginal that they're worth adding.
The first is the local copy of the running process' data to the CPU it's running
on to allow that data to be updated lockless where possible. Then there is
deference paid to the last CPU a task was running on, by trying that CPU first
-when looking for an idle CPU to use the next time it's scheduled.
-
-The real cost of migrating a task from one CPU to another is entirely dependant
-on the cache footprint of the task, how cache intensive the task is, how long
-it's been running on that CPU to take up the bulk of its cache, how big the CPU
-cache is, how fast and how layered the CPU cache is, how fast a context switch
-is... and so on. In other words, it's close to random in the real world where we
-do more than just one sole workload. The only thing we can be sure of is that
-it's not free. So BFS uses the principle that an idle CPU is a wasted CPU and
-utilising idle CPUs is more important than cache locality, and cache locality
-only plays a part after that.
+when looking for an idle CPU to use the next time it's scheduled. Finally there
+is the notion of cache locality beyond the last running CPU. The sched_domains
+information is used to determine the relative virtual "cache distance" that
+other CPUs have from the last CPU a task was running on. CPUs with shared
+caches, such as SMT siblings, or multicore CPUs with shared caches, are treated
+as cache local. CPUs without shared caches are treated as not cache local, and
+CPUs on different NUMA nodes are treated as very distant. This "relative cache
+distance" is used by modifying the virtual deadline value when doing lookups.
+Effectively, the deadline is unaltered between "cache local" CPUs, doubled for
+"cache distant" CPUs, and quadrupled for "very distant" CPUs. The reasoning
+behind the doubling of deadlines is as follows. The real cost of migrating a
+task from one CPU to another is entirely dependant on the cache footprint of
+the task, how cache intensive the task is, how long it's been running on that
+CPU to take up the bulk of its cache, how big the CPU cache is, how fast and
+how layered the CPU cache is, how fast a context switch is... and so on. In
+other words, it's close to random in the real world where we do more than just
+one sole workload. The only thing we can be sure of is that it's not free. So
+BFS uses the principle that an idle CPU is a wasted CPU and utilising idle CPUs
+is more important than cache locality, and cache locality only plays a part
+after that. Doubling the effective deadline is based on the premise that the
+"cache local" CPUs will tend to work on the same tasks up to double the number
+of cache local CPUs, and once the workload is beyond that amount, it is likely
+that none of the tasks are cache warm anywhere anyway. The quadrupling for NUMA
+is a value I pulled out of my arse.
When choosing an idle CPU for a waking task, the cache locality is determined
according to where the task last ran and then idle CPUs are ranked from best
@@ -209,26 +207,31 @@ to worst to choose the most suitable idle CPU based on cache locality, NUMA
node locality and hyperthread sibling business. They are chosen in the
following preference (if idle):
- * Same thread, idle or busy cache, idle or busy threads
- * Other core, same cache, idle or busy cache, idle threads.
- * Same node, other CPU, idle cache, idle threads.
- * Same node, other CPU, busy cache, idle threads.
- * Other core, same cache, busy threads.
- * Same node, other CPU, busy threads.
- * Other node, other CPU, idle cache, idle threads.
- * Other node, other CPU, busy cache, idle threads.
- * Other node, other CPU, busy threads.
+* Same core, idle or busy cache, idle threads
+* Other core, same cache, idle or busy cache, idle threads.
+* Same node, other CPU, idle cache, idle threads.
+* Same node, other CPU, busy cache, idle threads.
+* Same core, busy threads.
+* Other core, same cache, busy threads.
+* Same node, other CPU, busy threads.
+* Other node, other CPU, idle cache, idle threads.
+* Other node, other CPU, busy cache, idle threads.
+* Other node, other CPU, busy threads.
This shows the SMT or "hyperthread" awareness in the design as well which will
choose a real idle core first before a logical SMT sibling which already has
-tasks on the physical CPU. Early benchmarking of BFS suggested scalability
-dropped off at the 16 CPU mark. However this benchmarking was performed on an
-earlier design that was far less scalable than the current one so it's hard to
-know how scalable it is in terms of number of CPUs (due to the global
-runqueue). Note that in terms of scalability, the number of _logical_ CPUs
-matters, not the number of _physical_ CPUs. Thus, a dual (2x) quad core (4X)
-hyperthreaded (2X) machine is effectively a 16X. Newer benchmark results are
-very promising indeed. Benchmark contributions are most welcome.
+tasks on the physical CPU.
+
+Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
+However this benchmarking was performed on an earlier design that was far less
+scalable than the current one so it's hard to know how scalable it is in terms
+of both CPUs (due to the global runqueue) and heavily loaded machines (due to
+O(n) lookup) at this stage. Note that in terms of scalability, the number of
+_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
+quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
+results are very promising indeed, without needing to tweak any knobs, features
+or options. Benchmark contributions are most welcome.
+
Features
@@ -241,43 +244,30 @@ and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
support for CGROUPS. The average user should neither need to know what these
are, nor should they need to be using them to have good desktop behaviour.
-Rudimentary support for the CPU controller CGROUP in the form of filesystem
-stubs for the expected CGROUP structure to allow applications that demand their
-presence to work but they do not have any functionality.
-There are two "scheduler" tunables, the round robin interval and the
-interactive flag. These can be accessed in
+rr_interval
+
+There is only one "scheduler" tunable, the round robin interval. This can be
+accessed in
/proc/sys/kernel/rr_interval
- /proc/sys/kernel/interactive
-
-rr_interval value
-
-The value is in milliseconds, and the default value is set to 6ms. Valid values
-are from 1 to 1000. Decreasing the value will decrease latencies at the cost of
-decreasing throughput, while increasing it will improve throughput, but at the
-cost of worsening latencies. The accuracy of the rr interval is limited by HZ
-resolution of the kernel configuration. Thus, the worst case latencies are
-usually slightly higher than this actual value. BFS uses "dithering" to try and
-minimise the effect the Hz limitation has. The default value of 6 is not an
-arbitrary one. It is based on the fact that humans can detect jitter at
-approximately 7ms, so aiming for much lower latencies is pointless under most
-circumstances. It is worth noting this fact when comparing the latency
-performance of BFS to other schedulers. Worst case latencies being higher than
-7ms are far worse than average latencies not being in the microsecond range.
-Experimentation has shown that rr intervals being increased up to 300 can
-improve throughput but beyond that, scheduling noise from elsewhere prevents
-further demonstrable throughput.
-
-interactive flag
-
-This is a simple boolean that can be set to 1 or 0, set to 1 by default. This
-sacrifices some of the interactive performance by giving tasks a degree of
-soft affinity for logical CPUs when it will lead to improved throughput, but
-enabling it also sacrifices the completely deterministic nature with respect
-to latency that BFS otherwise normally provides, and subsequently leads to
-slightly higher latencies and a noticeably less interactive system.
+The value is in milliseconds, and the default value is set to 6 on a
+uniprocessor machine, and automatically set to a progressively higher value on
+multiprocessor machines. The reasoning behind increasing the value on more CPUs
+is that the effective latency is decreased by virtue of there being more CPUs on
+BFS (for reasons explained above), and increasing the value allows for less
+cache contention and more throughput. Valid values are from 1 to 1000
+Decreasing the value will decrease latencies at the cost of decreasing
+throughput, while increasing it will improve throughput, but at the cost of
+worsening latencies. The accuracy of the rr interval is limited by HZ resolution
+of the kernel configuration. Thus, the worst case latencies are usually slightly
+higher than this actual value. The default value of 6 is not an arbitrary one.
+It is based on the fact that humans can detect jitter at approximately 7ms, so
+aiming for much lower latencies is pointless under most circumstances. It is
+worth noting this fact when comparing the latency performance of BFS to other
+schedulers. Worst case latencies being higher than 7ms are far worse than
+average latencies not being in the microsecond range.
Isochronous scheduling.
@@ -358,4 +348,4 @@ of total wall clock time taken and total work done, rather than the reported
"cpu usage".
-Con Kolivas <kernel@kolivas.org> Tue, 5 Apr 2011
+Con Kolivas <kernel@kolivas.org> Fri Aug 27 2010
diff --git a/Documentation/scheduler/sched-MuQSS.txt b/Documentation/scheduler/sched-MuQSS.txt
index 2521d1ad0..bbd6980a0 100644
--- a/Documentation/scheduler/sched-MuQSS.txt
+++ b/Documentation/scheduler/sched-MuQSS.txt
@@ -1,9 +1,10 @@
MuQSS - The Multiple Queue Skiplist Scheduler by Con Kolivas.
-See sched-BFS.txt for basic design; MuQSS is a per-cpu runqueue variant with
+MuQSS is a per-cpu runqueue variant of the original BFS scheduler with
one 8 level skiplist per runqueue, and fine grained locking for much more
scalability.
+
Goals.
The goal of the Multiple Queue Skiplist Scheduler, referred to as MuQSS from
@@ -19,11 +20,11 @@ scalable to many CPUs and processes.
Design summary.
MuQSS is best described as per-cpu multiple runqueue, O(log n) insertion, O(1)
-lookup, earliest effective virtual deadline first design, loosely based on EEVDF
-(earliest eligible virtual deadline first) and my previous Staircase Deadline
-scheduler, and evolved from the single runqueue O(n) BFS scheduler. Each
-component shall be described in order to understand the significance of, and
-reasoning for it.
+lookup, earliest effective virtual deadline first tickless design, loosely based
+on EEVDF (earliest eligible virtual deadline first) and my previous Staircase
+Deadline scheduler, and evolved from the single runqueue O(n) BFS scheduler.
+Each component shall be described in order to understand the significance of,
+and reasoning for it.
Design reasoning.
@@ -66,13 +67,279 @@ next task scheduling decision and task wakeup CPU choice to allow balancing to
happen by virtue of its choices.
-Design:
+Design details.
+
+Custom skip list implementation:
+
+To avoid the overhead of building up and tearing down skip list structures,
+the variant used by MuQSS has a number of optimisations making it specific for
+its use case in the scheduler. It uses static arrays of 8 'levels' instead of
+building up and tearing down structures dynamically. This makes each runqueue
+only scale O(log N) up to 256 tasks. However as there is one runqueue per CPU
+it means that it scales O(log N) up to 256 x number of logical CPUs which is
+far beyond the realistic task limits each CPU could handle. By being 8 levels
+it also makes the array exactly one cacheline in size. Additionally, each
+skip list node is bidirectional making insertion and removal amortised O(1),
+being O(k) where k is 1-8. Uniquely, we are only ever interested in the very
+first entry in each list at all times with MuQSS, so there is never a need to
+do a search and thus look up is always O(1).
+
+Task insertion:
+
+MuQSS inserts tasks into a per CPU runqueue as an O(log N) insertion into
+a custom skip list as described above (based on the original design by William
+Pugh). Insertion is ordered in such a way that there is never a need to do a
+search by ordering tasks according to static priority primarily, and then
+virtual deadline at the time of insertion.
+
+Niffies:
+
+Niffies are a monotonic forward moving timer not unlike the "jiffies" but are
+of nanosecond resolution. Niffies are calculated per-runqueue from the high
+resolution TSC timers, and in order to maintain fairness are synchronised
+between CPUs whenever both runqueues are locked concurrently.
+
+Virtual deadline:
+
+The key to achieving low latency, scheduling fairness, and "nice level"
+distribution in MuQSS is entirely in the virtual deadline mechanism. The one
+tunable in MuQSS is the rr_interval, or "round robin interval". This is the
+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
+tasks of the same nice level will be running for, or looking at it the other
+way around, the longest duration two tasks of the same nice level will be
+delayed for. When a task requests cpu time, it is given a quota (time_slice)
+equal to the rr_interval and a virtual deadline. The virtual deadline is
+offset from the current time in niffies by this equation:
+
+ niffies + (prio_ratio * rr_interval)
+
+The prio_ratio is determined as a ratio compared to the baseline of nice -20
+and increases by 10% per nice level. The deadline is a virtual one only in that
+no guarantee is placed that a task will actually be scheduled by this time, but
+it is used to compare which task should go next. There are three components to
+how a task is next chosen. First is time_slice expiration. If a task runs out
+of its time_slice, it is descheduled, the time_slice is refilled, and the
+deadline reset to that formula above. Second is sleep, where a task no longer
+is requesting CPU for whatever reason. The time_slice and deadline are _not_
+adjusted in this case and are just carried over for when the task is next
+scheduled. Third is preemption, and that is when a newly waking task is deemed
+higher priority than a currently running task on any cpu by virtue of the fact
+that it has an earlier virtual deadline than the currently running task. The
+earlier deadline is the key to which task is next chosen for the first and
+second cases.
+
+The CPU proportion of different nice tasks works out to be approximately the
+
+ (prio_ratio difference)^2
+
+The reason it is squared is that a task's deadline does not change while it is
+running unless it runs out of time_slice. Thus, even if the time actually
+passes the deadline of another task that is queued, it will not get CPU time
+unless the current running task deschedules, and the time "base" (niffies) is
+constantly moving.
+
+Task lookup:
+
+As tasks are already pre-ordered according to anticipated scheduling order in
+the skip lists, lookup for the next suitable task per-runqueue is always a
+matter of simply selecting the first task in the 0th level skip list entry.
+In order to maintain optimal latency and fairness across CPUs, MuQSS does a
+novel examination of every other runqueue in cache locality order, choosing the
+best task across all runqueues. This provides near-determinism of how long any
+task across the entire system may wait before receiving CPU time. The other
+runqueues are first examine lockless and then trylocked to minimise the
+potential lock contention if they are likely to have a suitable better task.
+Each other runqueue lock is only held for as long as it takes to examine the
+entry for suitability. In "interactive" mode, the default setting, MuQSS will
+look for the best deadline task across all CPUs, while in !interactive mode,
+it will only select a better deadline task from another CPU if it is more
+heavily laden than the current one.
+
+Lookup is therefore O(k) where k is number of CPUs.
+
+
+Latency.
+
+Through the use of virtual deadlines to govern the scheduling order of normal
+tasks, queue-to-activation latency per runqueue is guaranteed to be bound by
+the rr_interval tunable which is set to 6ms by default. This means that the
+longest a CPU bound task will wait for more CPU is proportional to the number
+of running tasks and in the common case of 0-2 running tasks per CPU, will be
+under the 7ms threshold for human perception of jitter. Additionally, as newly
+woken tasks will have an early deadline from their previous runtime, the very
+tasks that are usually latency sensitive will have the shortest interval for
+activation, usually preempting any existing CPU bound tasks.
+
+Tickless expiry:
+
+A feature of MuQSS is that it is not tied to the resolution of the chosen tick
+rate in Hz, instead depending entirely on the high resolution timers where
+possible for sub-millisecond accuracy on timeouts regarless of the underlying
+tick rate. This allows MuQSS to be run with the low overhead of low Hz rates
+such as 100 by default, benefiting from the improved throughput and lower
+power usage it provides. Another advantage of this approach is that in
+combination with the Full No HZ option, which disables ticks on running task
+CPUs instead of just idle CPUs, the tick can be disabled at all times
+regardless of how many tasks are running instead of being limited to just one
+running task. Note that this option is NOT recommended for regular desktop
+users.
+
+
+Scalability and balancing.
+
+Unlike traditional approaches where balancing is a combination of CPU selection
+at task wakeup and intermittent balancing based on a vast array of rules set
+according to architecture, busyness calculations and special case management,
+MuQSS indirectly balances on the fly at task wakeup and next task selection.
+During initialisation, MuQSS creates a cache coherency ordered list of CPUs for
+each logical CPU and uses this to aid task/CPU selection when CPUs are busy.
+Additionally it selects any idle CPUs, if they are available, at any time over
+busy CPUs according to the following preference:
+
+ * Same thread, idle or busy cache, idle or busy threads
+ * Other core, same cache, idle or busy cache, idle threads.
+ * Same node, other CPU, idle cache, idle threads.
+ * Same node, other CPU, busy cache, idle threads.
+ * Other core, same cache, busy threads.
+ * Same node, other CPU, busy threads.
+ * Other node, other CPU, idle cache, idle threads.
+ * Other node, other CPU, busy cache, idle threads.
+ * Other node, other CPU, busy threads.
+
+Mux is therefore SMT, MC and Numa aware without the need for extra
+intermittent balancing to maintain CPUs busy and make the most of cache
+coherency.
+
+
+Features
+
+As the initial prime target audience for MuQSS was the average desktop user, it
+was designed to not need tweaking, tuning or have features set to obtain benefit
+from it. Thus the number of knobs and features has been kept to an absolute
+minimum and should not require extra user input for the vast majority of cases.
+There are 3 optional tunables, and 2 extra scheduling policies. The rr_interval,
+interactive, and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO
+policies. In addition to this, MuQSS also uses sub-tick accounting. What MuQSS
+does _not_ now feature is support for CGROUPS. The average user should neither
+need to know what these are, nor should they need to be using them to have good
+desktop behaviour. However since some applications refuse to work without
+cgroups, one can enable them with MuQSS as a stub and the filesystem will be
+created which will allow the applications to work.
+
+rr_interval:
+
+ /proc/sys/kernel/rr_interval
+
+The value is in milliseconds, and the default value is set to 6. Valid values
+are from 1 to 1000 Decreasing the value will decrease latencies at the cost of
+decreasing throughput, while increasing it will improve throughput, but at the
+cost of worsening latencies. It is based on the fact that humans can detect
+jitter at approximately 7ms, so aiming for much lower latencies is pointless
+under most circumstances. It is worth noting this fact when comparing the
+latency performance of MuQSS to other schedulers. Worst case latencies being
+higher than 7ms are far worse than average latencies not being in the
+microsecond range.
+
+interactive:
+
+ /proc/sys/kernel/interactive
+
+The value is a simple boolean of 1 for on and 0 for off and is set to on by
+default. Disabling this will disable the near-determinism of MuQSS when
+selecting the next task by not examining all CPUs for the earliest deadline
+task, or which CPU to wake to, instead prioritising CPU balancing for improved
+throughput. Latency will still be bound by rr_interval, but on a per-CPU basis
+instead of across the whole system.
+
+Isochronous scheduling:
+
+Isochronous scheduling is a unique scheduling policy designed to provide
+near-real-time performance to unprivileged (ie non-root) users without the
+ability to starve the machine indefinitely. Isochronous tasks (which means
+"same time") are set using, for example, the schedtool application like so:
+
+ schedtool -I -e amarok
+
+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
+is that it has a priority level between true realtime tasks and SCHED_NORMAL
+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
+rate). However if ISO tasks run for more than a tunable finite amount of time,
+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
+time is the percentage of CPU available per CPU, configurable as a percentage in
+the following "resource handling" tunable (as opposed to a scheduler tunable):
+
+iso_cpu:
+
+ /proc/sys/kernel/iso_cpu
+
+and is set to 70% by default. It is calculated over a rolling 5 second average
+Because it is the total CPU available, it means that on a multi CPU machine, it
+is possible to have an ISO task running as realtime scheduling indefinitely on
+just one CPU, as the other CPUs will be available. Setting this to 100 is the
+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
+ability to run any pseudo-realtime tasks.
+
+A feature of MuQSS is that it detects when an application tries to obtain a
+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
+appropriate privileges to use those policies. When it detects this, it will
+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
+
+
+Idleprio scheduling:
+
+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
+ultra low priority tasks to be run in the background that have virtually no
+effect on the foreground tasks. This is ideally suited to distributed computing
+clients (like setiathome, folding, mprime etc) but can also be used to start a
+video encode or so on without any slowdown of other tasks. To avoid this policy
+from grabbing shared resources and holding them indefinitely, if it detects a
+state where the task is waiting on I/O, the machine is about to suspend to ram
+and so on, it will transiently schedule them as SCHED_NORMAL. Once a task has
+been scheduled as IDLEPRIO, it cannot be put back to SCHED_NORMAL without
+superuser privileges since it is effectively a lower scheduling policy. Tasks
+can be set to start as SCHED_IDLEPRIO with the schedtool command like so:
+
+schedtool -D -e ./mprime
+
+Subtick accounting:
-MuQSS is an 8 level skip list per runqueue variant of BFS.
+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
+the accounting is done by simply determining what is happening at the precise
+moment a timer tick fires off. This becomes increasingly inaccurate as the timer
+tick frequency (HZ) is lowered. It is possible to create an application which
+uses almost 100% CPU, yet by being descheduled at the right time, records zero
+CPU usage. While the main problem with this is that there are possible security
+implications, it is also difficult to determine how much CPU a task really does
+use. Mux uses sub-tick accounting from the TSC clock to determine real CPU
+usage. Thus, the amount of CPU reported as being used by MuQSS will more
+accurately represent how much CPU the task itself is using (as is shown for
+example by the 'time' application), so the reported values may be quite
+different to other schedulers. When comparing throughput of MuQSS to other
+designs, it is important to compare the actual completed work in terms of total
+wall clock time taken and total work done, rather than the reported "cpu usage".
-See sched-BFS.txt for some of the shared design details.
+Symmetric MultiThreading (SMT) aware nice:
-Documentation yet to be completed.
+SMT, a.k.a. hyperthreading, is a very common feature on modern CPUs. While the
+logical CPU count rises by adding thread units to each CPU core, allowing more
+than one task to be run simultaneously on the same core, the disadvantage of it
+is that the CPU power is shared between the tasks, not summating to the power
+of two CPUs. The practical upshot of this is that two tasks running on
+separate threads of the same core run significantly slower than if they had one
+core each to run on. While smart CPU selection allows each task to have a core
+to itself whenever available (as is done on MuQSS), it cannot offset the
+slowdown that occurs when the cores are all loaded and only a thread is left.
+Most of the time this is harmless as the CPU is effectively overloaded at this
+point and the extra thread is of benefit. However when running a niced task in
+the presence of an un-niced task (say nice 19 v nice 0), the nice task gets
+precisely the same amount of CPU power as the unniced one. MuQSS has an
+optional configuration feature known as SMT-NICE which selectively idles the
+secondary niced thread for a period proportional to the nice difference,
+allowing CPU distribution according to nice level to be maintained, at the
+expense of a small amount of extra overhead. If this is configured in on a
+machine without SMT threads, the overhead is minimal.
-Con Kolivas <kernel@kolivas.org> Sun, 2nd October 2016
+Con Kolivas <kernel@kolivas.org> Sat, 29th October 2016
diff --git a/Makefile b/Makefile
index 202eb9484..082d6bc4d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 8
-SUBLEVEL = 4
+SUBLEVEL = 6
EXTRAVERSION = -gnu
NAME = Psychotic Stoned Sheep
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 6cb3736b6..d347bbc08 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
struct user_regs_struct uregs;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
- if (!err)
- set_current_blocked(&set);
-
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+ if (err)
+ return err;
+ set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
regs->lp_start = uregs.scratch.lp_start;
regs->lp_end = uregs.scratch.lp_end;
@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
regs->r0 = uregs.scratch.r0;
regs->sp = uregs.scratch.sp;
- return err;
+ return 0;
}
static inline int is_do_ss_needed(unsigned int magic)
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index 1c6a04021..e2e959959 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -51,14 +51,6 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "veth";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-boot-on;
- };
-
xtal24mhz: xtal24mhz@24M {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -134,16 +126,15 @@
bank-width = <4>;
};
- /* SMSC 9118 ethernet with PHY and EEPROM */
+ /* SMSC LAN91C111 ethernet with PHY and EEPROM */
ethernet: ethernet@4e000000 {
- compatible = "smsc,lan9118", "smsc,lan9115";
+ compatible = "smsc,lan91c111";
reg = <0x4e000000 0x10000>;
- phy-mode = "mii";
- reg-io-width = <4>;
- smsc,irq-active-high;
- smsc,irq-push-pull;
- vdd33a-supply = <&veth>;
- vddvario-supply = <&veth>;
+ /*
+ * This means the adapter can be accessed with 8, 16 or
+ * 32 bit reads/writes.
+ */
+ reg-io-width = <7>;
};
usb: usb@4f000000 {
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index 03b8bbeb6..652418aa2 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -47,7 +47,8 @@
};
memory {
- reg = <0x60000000 0x20000000>;
+ device_type = "memory";
+ reg = <0x60000000 0x80000000>;
};
};
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index ca86da682..854117dc0 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -119,7 +119,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mcspi1_pins>;
- lcd0: display {
+ lcd0: display@1 {
compatible = "lgphilips,lb035q02";
label = "lcd35";
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index f68b3242b..3f528a379 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -899,8 +899,7 @@
resets = <&apbs_rst 0>;
gpio-controller;
interrupt-controller;
- #address-cells = <1>;
- #size-cells = <0>;
+ #interrupt-cells = <3>;
#gpio-cells = <3>;
r_ir_pins: r_ir {
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 1568cb5cd..b88364aa1 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req)
}
}
+static int ghash_async_import(struct ahash_request *req, const void *in)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+
+ desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
+ desc->flags = req->base.flags;
+
+ return crypto_shash_import(desc, in);
+}
+
+static int ghash_async_export(struct ahash_request *req, void *out)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+
+ return crypto_shash_export(desc, out);
+}
+
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
+ .import = ghash_async_import,
+ .export = ghash_async_export,
.halg.digestsize = GHASH_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct ghash_desc_ctx),
.halg.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-ce",
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index d9206811b..c71c483f4 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -131,16 +131,11 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm)
return is_resume;
}
-static unsigned long corgi_charger_wakeup(void)
+static bool corgi_charger_wakeup(void)
{
- unsigned long ret;
-
- ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN))
- | (!gpio_get_value(CORGI_GPIO_KEY_INT)
- << GPIO_bit(CORGI_GPIO_KEY_INT))
- | (!gpio_get_value(CORGI_GPIO_WAKEUP)
- << GPIO_bit(CORGI_GPIO_WAKEUP));
- return ret;
+ return !gpio_get_value(CORGI_GPIO_AC_IN) ||
+ !gpio_get_value(CORGI_GPIO_KEY_INT) ||
+ !gpio_get_value(CORGI_GPIO_WAKEUP);
}
unsigned long corgipm_read_devdata(int type)
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index 2385052b0..e362f865f 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -41,30 +41,35 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d)
unsigned long pending;
unsigned int bit;
- pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
- for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
- generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
+ do {
+ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
+ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
+ generic_handle_irq(irq_find_mapping(fpga->irqdomain,
+ bit));
+ }
+ } while (pending);
return IRQ_HANDLED;
}
-static void cplds_irq_mask_ack(struct irq_data *d)
+static void cplds_irq_mask(struct irq_data *d)
{
struct cplds *fpga = irq_data_get_irq_chip_data(d);
unsigned int cplds_irq = irqd_to_hwirq(d);
- unsigned int set, bit = BIT(cplds_irq);
+ unsigned int bit = BIT(cplds_irq);
fpga->irq_mask &= ~bit;
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
- set = readl(fpga->base + FPGA_IRQ_SET_CLR);
- writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
}
static void cplds_irq_unmask(struct irq_data *d)
{
struct cplds *fpga = irq_data_get_irq_chip_data(d);
unsigned int cplds_irq = irqd_to_hwirq(d);
- unsigned int bit = BIT(cplds_irq);
+ unsigned int set, bit = BIT(cplds_irq);
+
+ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
+ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
fpga->irq_mask |= bit;
writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
@@ -72,7 +77,8 @@ static void cplds_irq_unmask(struct irq_data *d)
static struct irq_chip cplds_irq_chip = {
.name = "pxa_cplds",
- .irq_mask_ack = cplds_irq_mask_ack,
+ .irq_ack = cplds_irq_mask,
+ .irq_mask = cplds_irq_mask,
.irq_unmask = cplds_irq_unmask,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index b80eab999..249b7bd5f 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -744,7 +744,7 @@ static int sharpsl_off_charge_battery(void)
time = RCNR;
while (1) {
/* Check if any wakeup event had occurred */
- if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ if (sharpsl_pm.machinfo->charger_wakeup())
return 0;
/* Check for timeout */
if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.h b/arch/arm/mach-pxa/sharpsl_pm.h
index 905be6755..fa75b6df8 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.h
+++ b/arch/arm/mach-pxa/sharpsl_pm.h
@@ -34,7 +34,7 @@ struct sharpsl_charger_machinfo {
#define SHARPSL_STATUS_LOCK 5
#define SHARPSL_STATUS_CHRGFULL 6
#define SHARPSL_STATUS_FATAL 7
- unsigned long (*charger_wakeup)(void);
+ bool (*charger_wakeup)(void);
int (*should_wakeup)(unsigned int resume_on_alarm);
void (*backlight_limit)(int);
int (*backlight_get_status) (void);
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index ea9f9034c..4e64a1402 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -165,13 +165,10 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm)
return is_resume;
}
-static unsigned long spitz_charger_wakeup(void)
+static bool spitz_charger_wakeup(void)
{
- unsigned long ret;
- ret = ((!gpio_get_value(SPITZ_GPIO_KEY_INT)
- << GPIO_bit(SPITZ_GPIO_KEY_INT))
- | gpio_get_value(SPITZ_GPIO_SYNC));
- return ret;
+ return !gpio_get_value(SPITZ_GPIO_KEY_INT) ||
+ gpio_get_value(SPITZ_GPIO_SYNC);
}
unsigned long spitzpm_read_devdata(int type)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 4cdeae3b1..948a9a8a9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -167,11 +167,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
-}
-
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
@@ -192,6 +187,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+ kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+}
+
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754..06ff7fd9e 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,6 +17,7 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
+#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
+#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 2fee2f592..5394c8405 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
\
switch (size) { \
case 1: \
- do { \
- asm ("//__per_cpu_" #op "_1\n" \
- "ldxrb %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_1\n" \
+ "1: ldxrb %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrb %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u8 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrb %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u8 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 2: \
- do { \
- asm ("//__per_cpu_" #op "_2\n" \
- "ldxrh %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_2\n" \
+ "1: ldxrh %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrh %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u16 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrh %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u16 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 4: \
- do { \
- asm ("//__per_cpu_" #op "_4\n" \
- "ldxr %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_4\n" \
+ "1: ldxr %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxr %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u32 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u32 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 8: \
- do { \
- asm ("//__per_cpu_" #op "_8\n" \
- "ldxr %[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_8\n" \
+ "1: ldxr %[ret], %[ptr]\n" \
#asm_op " %[ret], %[ret], %[val]\n" \
- "stxr %w[loop], %[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u64 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u64 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
default: \
BUILD_BUG(); \
@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
switch (size) {
case 1:
- do {
- asm ("//__percpu_xchg_1\n"
- "ldxrb %w[ret], %[ptr]\n"
- "stxrb %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u8 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_1\n"
+ "1: ldxrb %w[ret], %[ptr]\n"
+ " stxrb %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u8 *)ptr)
+ : [val] "r" (val));
break;
case 2:
- do {
- asm ("//__percpu_xchg_2\n"
- "ldxrh %w[ret], %[ptr]\n"
- "stxrh %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u16 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_2\n"
+ "1: ldxrh %w[ret], %[ptr]\n"
+ " stxrh %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u16 *)ptr)
+ : [val] "r" (val));
break;
case 4:
- do {
- asm ("//__percpu_xchg_4\n"
- "ldxr %w[ret], %[ptr]\n"
- "stxr %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u32 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_4\n"
+ "1: ldxr %w[ret], %[ptr]\n"
+ " stxr %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u32 *)ptr)
+ : [val] "r" (val));
break;
case 8:
- do {
- asm ("//__percpu_xchg_8\n"
- "ldxr %[ret], %[ptr]\n"
- "stxr %w[loop], %[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u64 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_8\n"
+ "1: ldxr %[ret], %[ptr]\n"
+ " stxr %w[loop], %[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u64 *)ptr)
+ : [val] "r" (val));
break;
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index c47257c91..db849839e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -21,6 +21,7 @@
/*
* User space memory access functions
*/
+#include <linux/bitops.h>
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
flag; \
})
+/*
+ * When dealing with data aborts or instruction traps we may end up with
+ * a tagged userland pointer. Clear the tag to get a sane pointer to pass
+ * on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) sign_extend64(addr, 55)
+
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 42ffdb54e..b0988bb1b 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
/*
* Error-checking SWP macros implemented using ldxr{b}/stxr{b}
*/
-#define __user_swpX_asm(data, addr, res, temp, B) \
+
+/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
+#define __SWP_LL_SC_LOOPS 4
+
+#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
__asm__ __volatile__( \
+ " mov %w3, %w7\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- "0: ldxr"B" %w2, [%3]\n" \
- "1: stxr"B" %w0, %w1, [%3]\n" \
+ "0: ldxr"B" %w2, [%4]\n" \
+ "1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
- " mov %w0, %w4\n" \
+ " sub %w3, %w3, #1\n" \
+ " cbnz %w3, 0b\n" \
+ " mov %w0, %w5\n" \
" b 3f\n" \
"2:\n" \
" mov %w1, %w2\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
- "4: mov %w0, %w5\n" \
+ "4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- : "=&r" (res), "+r" (data), "=&r" (temp) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
+ "i" (__SWP_LL_SC_LOOPS) \
: "memory")
-#define __user_swp_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "")
-#define __user_swpb_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "b")
+#define __user_swp_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "")
+#define __user_swpb_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "b")
/*
* Bit 22 of the instruction encoding distinguishes between
@@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
}
while (1) {
- unsigned long temp;
+ unsigned long temp, temp2;
if (type == TYPE_SWPB)
- __user_swpb_asm(*data, address, res, temp);
+ __user_swpb_asm(*data, address, res, temp, temp2);
else
- __user_swp_asm(*data, address, res, temp);
+ __user_swp_asm(*data, address, res, temp, temp2);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 3e7b050e9..4d19508c5 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -578,8 +578,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
- msr mdcr_el2, x0 // all PMU counters from EL1
4:
+ csel x0, xzr, x0, lt // all PMU counters from EL1
+ msr mdcr_el2, x0 // (if they exist)
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index df0675084..771a01a7f 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -434,18 +434,21 @@ void cpu_enable_cache_maint_trap(void *__unused)
}
#define __user_cache_maint(insn, address, res) \
- asm volatile ( \
- "1: " insn ", %1\n" \
- " mov %w0, #0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %w2\n" \
- " b 2b\n" \
- " .popsection\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ if (untagged_addr(address) >= user_addr_max()) \
+ res = -EFAULT; \
+ else \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT) )
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index ce9e5e5f2..eaf08d3ab 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -98,6 +98,8 @@ ENTRY(__guest_exit)
// x4-x29,lr: vcpu regs
// vcpu x0-x3 on the stack
+ ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+
add x2, x0, #VCPU_CONTEXT
stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
index 470e365f0..8ff0a7086 100644
--- a/arch/metag/include/asm/atomic.h
+++ b/arch/metag/include/asm/atomic.h
@@ -39,11 +39,10 @@
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#endif
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
-
#include <asm-generic/atomic64.h>
#endif /* __ASM_METAG_ATOMIC_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index f6fc6aac5..b6578611d 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
static inline long regs_return_value(struct pt_regs *regs)
{
- if (is_syscall_success(regs))
+ if (is_syscall_success(regs) || !user_mode(regs))
return regs->regs[2];
else
return -regs->regs[2];
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 3b4538ec0..de9e8836d 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -82,7 +82,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
$(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
$(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
-$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
+$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
$(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 291cee28c..c2c43f714 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
-#ifdef CONFIG_64BIT
-#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+#if defined(CONFIG_64BIT)
+#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
#else
-#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
+#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index f7ea626e2..81d6f6391 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -38,6 +38,7 @@
#include <linux/export.h>
#include <asm/processor.h>
+#include <asm/sections.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <asm/machdep.h> /* for pa7300lc_init() proto */
@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
#endif
printk(KERN_CONT ".\n");
+ /*
+ * Check if initial kernel page mappings are sufficient.
+ * panic early if not, else we may access kernel functions
+ * and variables which can't be reached.
+ */
+ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
+ panic("KERNEL_INITIAL_ORDER too small!");
pdc_console_init();
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 4b0b963d5..9b63b876a 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -226,12 +226,6 @@ void __init start_cpu_itimer(void)
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
- /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
- if (cpu != 0)
- clear_sched_clock_stable();
-#endif
-
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick;
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index f3ead0b6c..75304af9f 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -89,8 +89,9 @@ SECTIONS
/* Start of data section */
_sdata = .;
- RO_DATA_SECTION(8)
-
+ /* Architecturally we need to keep __gp below 0x1000000 and thus
+ * in front of RO_DATA_SECTION() which stores lots of tracepoint
+ * and ftrace symbols. */
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
@@ -105,6 +106,8 @@ SECTIONS
}
#endif
+ RO_DATA_SECTION(8)
+
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 927d2ab2c..792cb1768 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -637,7 +637,7 @@ config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 8 9 if PPC64 && PPC_64K_PAGES
default "9" if PPC64 && PPC_64K_PAGES
- range 9 13 if PPC64 && !PPC_64K_PAGES
+ range 13 13 if PPC64 && !PPC_64K_PAGES
default "13" if PPC64 && !PPC_64K_PAGES
range 9 64 if PPC32 && PPC_16K_PAGES
default "9" if PPC32 && PPC_16K_PAGES
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 263bf39ce..9bd84ba06 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -6,6 +6,8 @@
*/
#define _PAGE_BIT_SWAP_TYPE 0
+#define _PAGE_RO 0
+
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 5f36e8a70..29aa8d1ce 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -994,6 +994,14 @@ static void eeh_handle_special_event(void)
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
bus = eeh_pe_bus_get(phb_pe);
+ if (!bus) {
+ pr_err("%s: Cannot find PCI bus for "
+ "PHB#%d-PE#%x\n",
+ __func__,
+ pe->phb->global_number,
+ pe->addr);
+ break;
+ }
eeh_pe_dev_traverse(pe,
eeh_report_failure, NULL);
pci_hp_remove_devices(bus);
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 64174bf95..05a0a913e 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -956,7 +956,7 @@ int __init nvram_remove_partition(const char *name, int sig,
/* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
- strncpy(part->header.name, "wwwwwwwwwwww", 12);
+ memset(part->header.name, 'w', 12);
part->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
if (rc <= 0) {
@@ -974,8 +974,8 @@ int __init nvram_remove_partition(const char *name, int sig,
}
if (prev) {
prev->header.length += part->header.length;
- prev->header.checksum = nvram_checksum(&part->header);
- rc = nvram_write_header(part);
+ prev->header.checksum = nvram_checksum(&prev->header);
+ rc = nvram_write_header(prev);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9ee2623e0..ad37aa175 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -88,7 +88,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
set_thread_flag(TIF_RESTORE_TM);
}
}
+
+static inline bool msr_tm_active(unsigned long msr)
+{
+ return MSR_TM_ACTIVE(msr);
+}
#else
+static inline bool msr_tm_active(unsigned long msr) { return false; }
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -208,7 +214,7 @@ void enable_kernel_fp(void)
EXPORT_SYMBOL(enable_kernel_fp);
static int restore_fp(struct task_struct *tsk) {
- if (tsk->thread.load_fp) {
+ if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
load_fp_state(&current->thread.fp_state);
current->thread.load_fp++;
return 1;
@@ -278,7 +284,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
static int restore_altivec(struct task_struct *tsk)
{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
load_vr_state(&tsk->thread.vr_state);
tsk->thread.used_vr = 1;
tsk->thread.load_vec++;
@@ -438,6 +445,7 @@ void giveup_all(struct task_struct *tsk)
return;
msr_check_and_set(msr_all_available);
+ check_if_tm_restore_required(tsk);
#ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP)
@@ -464,7 +472,8 @@ void restore_math(struct pt_regs *regs)
{
unsigned long msr;
- if (!current->thread.load_fp && !loadvec(current->thread))
+ if (!msr_tm_active(regs->msr) &&
+ !current->thread.load_fp && !loadvec(current->thread))
return;
msr = regs->msr;
@@ -983,6 +992,13 @@ void restore_tm_state(struct pt_regs *regs)
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
+ /* Ensure that restore_math() will restore */
+ if (msr_diff & MSR_FP)
+ current->thread.load_fp = 1;
+#ifdef CONFIG_ALIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
+ current->thread.load_vec = 1;
+#endif
restore_math(regs);
regs->msr |= msr_diff;
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index 184a6ba7f..abf17feff 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
li r0,NR_syscalls
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index a76b4af37..382021324 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
bne cr0,99f
li r3,0
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
lis r5,CLOCK_REALTIME_RES@h
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index f09899e35..7b22624f3 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
addi r3,r3,8
171:
177:
+179:
addi r3,r3,8
370:
372:
@@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
173:
174:
175:
-179:
181:
184:
186:
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index bb0354222..362954f98 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
switch (REGION_ID(ea)) {
case USER_REGION_ID:
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
+ if (mm == NULL)
+ return 1;
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 0821556e1..28923b2e2 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -526,7 +526,7 @@ static bool might_have_hea(void)
*/
#ifdef CONFIG_IBMEBUS
return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
- !firmware_has_feature(FW_FEATURE_SPLPAR);
+ firmware_has_feature(FW_FEATURE_SPLPAR);
#else
return false;
#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7372ee13e..a5d3ecdab 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -1019,8 +1019,15 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_READ;
+
+ /*
+ * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
+ * as 0 and _PAGE_RO has to be set when a page is not writable
+ */
if (write)
mask |= _PAGE_WRITE;
+ else
+ mask |= _PAGE_RO;
if ((pte_val(pte) & mask) != mask)
return 0;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 86544ea85..ba17fdd87 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1091,6 +1091,11 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
}
bus = eeh_pe_bus_get(pe);
+ if (!bus) {
+ pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ __func__, pe->phb->global_number, pe->addr);
+ return -EIO;
+ }
if (pe->type & EEH_PE_VF)
return pnv_eeh_reset_vf_pe(pe, option);
@@ -1306,7 +1311,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
return;
}
- switch (data->type) {
+ switch (be16_to_cpu(data->type)) {
case OPAL_P7IOC_DIAG_TYPE_RGC:
pr_info("P7IOC diag-data for RGC\n\n");
pnv_eeh_dump_hub_diag_common(data);
@@ -1538,7 +1543,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
/* Try best to clear it */
opal_pci_eeh_freeze_clear(phb->opal_id,
- frozen_pe_no,
+ be64_to_cpu(frozen_pe_no),
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
ret = EEH_NEXT_ERR_NONE;
} else if ((*pe)->state & EEH_PE_ISOLATED ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a21d831c1..0fe352005 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -309,8 +309,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
be64_to_cpu(data->dma1ErrorLog1));
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
- if ((data->pestA[i] >> 63) == 0 &&
- (data->pestB[i] >> 63) == 0)
+ if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
+ (be64_to_cpu(data->pestB[i]) >> 63) == 0)
continue;
pr_info("PE[%3d] A/B: %016llx %016llx\n",
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 86707e678..aa35245d8 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -393,7 +393,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
unsigned long *vpn, int count,
int psize, int ssize)
{
- unsigned long param[8];
+ unsigned long param[PLPAR_HCALL9_BUFSIZE];
int i = 0, pix = 0, rc;
unsigned long flags = 0;
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -522,7 +522,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
unsigned long flags = 0;
struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
- unsigned long param[9];
+ unsigned long param[PLPAR_HCALL9_BUFSIZE];
unsigned long hash, index, shift, hidx, slot;
real_pte_t pte;
int psize, ssize;
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 81d49476c..82e8e2b6a 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -233,8 +233,6 @@ void __init cpm_reset(void)
else
out_be32(&siu_conf->sc_sdcr, 1);
immr_unmap(siu_conf);
-
- cpm_muram_init();
}
static DEFINE_SPINLOCK(cmd_lock);
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 8dc1e24f3..f78ff8416 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -66,10 +66,6 @@ void __init cpm2_reset(void)
cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
#endif
- /* Reclaim the DP memory for our use.
- */
- cpm_muram_init();
-
/* Tell everyone where the comm processor resides.
*/
cpmp = &cpm2_immr->im_cpm;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 947f42007..51bf749a4 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -37,6 +37,21 @@
#include <linux/of_gpio.h>
#endif
+static int __init cpm_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm2");
+ if (!np)
+ return -ENODEV;
+ cpm_muram_init();
+ of_node_put(np);
+ return 0;
+}
+subsys_initcall(cpm_init);
+
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
static u32 __iomem *cpm_udbg_txdesc;
static u8 __iomem *cpm_udbg_txbuf;
diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S
index 84ad74213..7d8b0e8ed 100644
--- a/arch/powerpc/xmon/spr_access.S
+++ b/arch/powerpc/xmon/spr_access.S
@@ -2,12 +2,12 @@
/* unsigned long xmon_mfspr(sprn, default_value) */
_GLOBAL(xmon_mfspr)
- ld r5, .Lmfspr_table@got(r2)
+ PPC_LL r5, .Lmfspr_table@got(r2)
b xmon_mxspr
/* void xmon_mtspr(sprn, new_value) */
_GLOBAL(xmon_mtspr)
- ld r5, .Lmtspr_table@got(r2)
+ PPC_LL r5, .Lmtspr_table@got(r2)
b xmon_mxspr
/*
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index dfd0ca263..9746b780a 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -118,8 +118,13 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
- WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
- return -EOPNOTSUPP;
+ KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
+ current->pid, vcpu->kvm);
+
+ /* do not warn on invalid runtime instrumentation mode */
+ WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
+ viwhy);
+ return -EINVAL;
}
static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1fc4c59b9..7767a5fd9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2052,7 +2052,7 @@ config HOTPLUG_CPU
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"
default n
- depends on HOTPLUG_CPU && !SCHED_MUQSS
+ depends on HOTPLUG_CPU
---help---
Set whether default state of cpu0_hotpluggable is on or off.
@@ -2081,7 +2081,7 @@ config BOOTPARAM_HOTPLUG_CPU0
config DEBUG_HOTPLUG_CPU0
def_bool n
prompt "Debug CPU0 hotplug"
- depends on HOTPLUG_CPU && !SCHED_MUQSS
+ depends on HOTPLUG_CPU
---help---
Enabling this option offlines CPU0 (if CPU0 can be offlined) as
soon as possible and boots up userspace with CPU0 offlined. User
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5fa6ee2c2..824c48d6a 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -54,7 +54,7 @@ CONFIG_HIGHPTE=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
# CONFIG_MTRR_SANITIZER is not set
CONFIG_EFI=y
-CONFIG_HZ_1000=y
+CONFIG_HZ_100=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
# CONFIG_COMPAT_VDSO is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index d28bdabcc..b8c4f668c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -52,7 +52,7 @@ CONFIG_NUMA=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
# CONFIG_MTRR_SANITIZER is not set
CONFIG_EFI=y
-CONFIG_HZ_1000=y
+CONFIG_HZ_100=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
# CONFIG_COMPAT_VDSO is not set
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 8a90f1517..625eb698c 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -348,7 +348,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
* continue building up new bios map based on this
* information
*/
- if (current_type != last_type) {
+ if (current_type != last_type || current_type == E820_PRAM) {
if (last_type != 0) {
new_bios[new_bios_entry].size =
change_point[chgidx]->addr - last_addr;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index de7501edb..8b8852bc2 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -317,16 +317,11 @@ static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
size_t stolen_size)
{
- u16 toud;
+ u16 toud = 0;
- /*
- * FIXME is the graphics stolen memory region
- * always at TOUD? Ie. is it always the last
- * one to be allocated by the BIOS?
- */
toud = read_pci_config_16(0, 0, 0, I865_TOUD);
- return (phys_addr_t)toud << 16;
+ return (phys_addr_t)(toud << 16) + i845_tseg_size();
}
static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 82b17373b..9e152cdab 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1408,15 +1408,17 @@ __init void prefill_possible_map(void)
/* No boot processor was found in mptable or ACPI MADT */
if (!num_processors) {
- int apicid = boot_cpu_physical_apicid;
- int cpu = hard_smp_processor_id();
+ if (boot_cpu_has(X86_FEATURE_APIC)) {
+ int apicid = boot_cpu_physical_apicid;
+ int cpu = hard_smp_processor_id();
- pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+ pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
- /* Make sure boot cpu is enumerated */
- if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
- apic->apic_id_valid(apicid))
- generic_processor_info(apicid, boot_cpu_apic_version);
+ /* Make sure boot cpu is enumerated */
+ if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+ apic->apic_id_valid(apicid))
+ generic_processor_info(apicid, boot_cpu_apic_version);
+ }
if (!num_processors)
num_processors = 1;
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index c7220ba94..1a22de70f 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->irr = 0;
ioapic->irr_delivered = 0;
ioapic->id = 0;
- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
+ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
rtc_irq_eoi_tracking_reset(ioapic);
}
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 23f2f3e41..58e152b3b 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
*/
return BIOS_STATUS_UNIMPLEMENTED;
- ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+ /*
+ * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
+ * callback method, which uses efi_call() directly, with the kernel page tables:
+ */
+ if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags)))
+ ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
+ else
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+
return ret;
}
EXPORT_SYMBOL_GPL(uv_bios_call);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index eef6ff49c..ecb949ec6 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4264,8 +4264,7 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq)
bfq_add_request(rq);
- rq->fifo_time = ktime_get_ns() +
- jiffies_to_nsecs(bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &bfqq->fifo);
bfq_rq_enqueued(bfqd, bfqq, rq);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index dd38e5ced..b08ccbb93 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1340,10 +1340,8 @@ int blkcg_policy_register(struct blkcg_policy *pol)
struct blkcg_policy_data *cpd;
cpd = pol->cpd_alloc_fn(GFP_KERNEL);
- if (!cpd) {
- mutex_unlock(&blkcg_pol_mutex);
+ if (!cpd)
goto err_free_cpds;
- }
blkcg->cpd[pol->plid] = cpd;
cpd->blkcg = blkcg;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 70a892e87..f624ac98c 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
- u8 iv[8];
+ u8 iv[16];
struct crypto_gcm_setkey_result result;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6482d47de..d5572295c 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -97,7 +97,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
int ret;
ret = of_irq_get(dev->dev.of_node, num);
- if (ret >= 0 || ret == -EPROBE_DEFER)
+ if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
}
@@ -175,7 +175,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
int ret;
ret = of_irq_get_byname(dev->dev.of_node, name);
- if (ret >= 0 || ret == -EPROBE_DEFER)
+ if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
}
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 01d4be2c3..f5c26a5f6 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret) {
+ if (ret < 0) {
dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(&pdev->dev);
goto err_ioremap;
@@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "Failed to runtime_get device: %d\n", ret);
pm_runtime_put_noidle(dev);
return ret;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 7a7970865..0fc71cbaa 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1006,16 +1006,28 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw,
return 0;
}
+static bool
+bcm2835_clk_is_pllc(struct clk_hw *hw)
+{
+ if (!hw)
+ return false;
+
+ return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0;
+}
+
static int bcm2835_clock_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct clk_hw *parent, *best_parent = NULL;
+ bool current_parent_is_pllc;
unsigned long rate, best_rate = 0;
unsigned long prate, best_prate = 0;
size_t i;
u32 div;
+ current_parent_is_pllc = bcm2835_clk_is_pllc(clk_hw_get_parent(hw));
+
/*
* Select parent clock that results in the closest but lower rate
*/
@@ -1023,6 +1035,17 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
+
+ /*
+ * Don't choose a PLLC-derived clock as our parent
+ * unless it had been manually set that way. PLLC's
+ * frequency gets adjusted by the firmware due to
+ * over-temp or under-voltage conditions, without
+ * prior notification to our clock consumer.
+ */
+ if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc)
+ continue;
+
prate = clk_hw_get_rate(parent);
div = bcm2835_clock_choose_div(hw, req->rate, prate, true);
rate = bcm2835_clock_rate_from_divisor(clock, prate, div);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index a0f55bc1a..96386ffc8 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -352,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- bestdiv = readl(divider->reg) >> divider->shift;
+ bestdiv = clk_readl(divider->reg) >> divider->shift;
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 58566a179..20b105584 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -766,7 +766,11 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
if (!hwc)
return NULL;
- hwc->reg = cg->regs + 0x20 * idx;
+ if (cg->info.flags & CG_VER3)
+ hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
+ else
+ hwc->reg = cg->regs + 0x20 * idx;
+
hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
/*
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 820a939fb..2877a4dde 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1908,10 +1908,6 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
- /* bail early if nothing to do */
- if (degrees == clk->core->phase)
- goto out;
-
trace_clk_set_phase(clk->core, degrees);
if (clk->core->ops->set_phase)
@@ -1922,7 +1918,6 @@ int clk_set_phase(struct clk *clk, int degrees)
if (!ret)
clk->core->phase = degrees;
-out:
clk_prepare_unlock();
return ret;
@@ -3186,7 +3181,7 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
- struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
+ struct clk_hw *hw;
if (!clkspec)
return ERR_PTR(-EINVAL);
@@ -3194,12 +3189,13 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
/* Check if we have such a provider in our array */
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
- if (provider->node == clkspec->np)
+ if (provider->node == clkspec->np) {
hw = __of_clk_get_hw_from_provider(provider, clkspec);
- if (!IS_ERR(hw)) {
clk = __clk_create_clk(hw, dev_id, con_id);
+ }
- if (!IS_ERR(clk) && !__clk_get(clk)) {
+ if (!IS_ERR(clk)) {
+ if (!__clk_get(clk)) {
__clk_free_clk(clk);
clk = ERR_PTR(-ENOENT);
}
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index b0978d3b8..d302ed3b8 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -115,7 +115,7 @@ static void __init _mx35_clocks_init(void)
}
clk[ckih] = imx_clk_fixed("ckih", 24000000);
- clk[ckil] = imx_clk_fixed("ckih", 32768);
+ clk[ckil] = imx_clk_fixed("ckil", 32768);
clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL);
clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index ba1c1ae72..ce8ea1040 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -318,11 +318,16 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels));
clk[IMX6QDL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2));
+ } else if (clk_on_imx6dl()) {
+ clk[IMX6QDL_CLK_MLB_SEL] = imx_clk_mux("mlb_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
} else {
clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
}
clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
- clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+ if (clk_on_imx6dl())
+ clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+ else
+ clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
@@ -400,9 +405,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
}
- clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
+ if (clk_on_imx6dl())
+ clk[IMX6QDL_CLK_MLB_PODF] = imx_clk_divider("mlb_podf", "mlb_sel", base + 0x18, 23, 3);
+ else
+ clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
clk[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
- clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
+ if (clk_on_imx6dl())
+ clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 29, 3);
+ else
+ clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
@@ -473,14 +484,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai);
clk[IMX6QDL_CLK_GPT_IPG] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
clk[IMX6QDL_CLK_GPT_IPG_PER] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
- if (clk_on_imx6dl())
- /*
- * The multiplexer and divider of imx6q clock gpu3d_shader get
- * redefined/reused as gpu2d_core_sel and gpu2d_core_podf on imx6dl.
- */
- clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu3d_shader", base + 0x6c, 24);
- else
- clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
+ clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
@@ -511,7 +515,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
* The multiplexer and divider of the imx6q clock gpu2d get
* redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl.
*/
- clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "gpu2d_core_podf", base + 0x74, 18);
+ clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18);
else
clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
@@ -629,6 +633,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
if (IS_ENABLED(CONFIG_PCI_IMX6))
clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
+ /*
+ * Initialize the GPU clock muxes, so that the maximum specified clock
+ * rates for the respective SoC are not exceeded.
+ */
+ if (clk_on_imx6dl()) {
+ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
+ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
+ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
+ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
+ } else if (clk_on_imx6q()) {
+ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
+ clk[IMX6QDL_CLK_MMDC_CH0_AXI]);
+ clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL],
+ clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
+ clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
+ clk[IMX6QDL_CLK_PLL3_USB_OTG]);
+ }
+
imx_register_uart_clocks(uart_clks);
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 95e3b3e0f..98909b184 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -117,6 +117,7 @@ config MSM_MMCC_8974
config MSM_GCC_8996
tristate "MSM8996 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8996 devices.
@@ -126,6 +127,7 @@ config MSM_GCC_8996
config MSM_MMCC_8996
tristate "MSM8996 Multimedia Clock Controller"
select MSM_GCC_8996
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8996 devices.
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index bbf732bbc..9f643cca8 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2592,9 +2592,9 @@ static struct clk_branch gcc_pcie_2_aux_clk = {
};
static struct clk_branch gcc_pcie_2_pipe_clk = {
- .halt_reg = 0x6e108,
+ .halt_reg = 0x6e018,
.clkr = {
- .enable_reg = 0x6e108,
+ .enable_reg = 0x6e018,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_2_pipe_clk",
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2ee40fd36..e1aa531a4 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -68,6 +68,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "sigma,tango4" },
+ { .compatible = "ti,am33xx", },
+ { .compatible = "ti,dra7", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap3", },
{ .compatible = "ti,omap4", },
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index af8825ad2..f40450d26 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,6 +17,7 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
+ unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -67,6 +68,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ unsigned int requested_freq = dbs_info->requested_freq;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
@@ -78,10 +80,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (cs_tuners->freq_step == 0)
goto out;
+ /*
+ * If requested_freq is out of range, it is likely that the limits
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+ if (requested_freq > policy->max || requested_freq < policy->min)
+ requested_freq = policy->cur;
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
- unsigned int requested_freq = policy->cur;
-
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
@@ -89,8 +97,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
goto out;
requested_freq += get_freq_target(cs_tuners, policy);
+ if (requested_freq > policy->max)
+ requested_freq = policy->max;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -101,7 +112,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target, requested_freq = policy->cur;
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
@@ -115,6 +126,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
requested_freq = policy->min;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ dbs_info->requested_freq = requested_freq;
}
out:
@@ -293,6 +305,7 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
}
static struct dbs_governor cs_governor = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index be9eade14..b46547e90 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -556,12 +556,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
- rdmsrl(MSR_HWP_CAPABILITIES, cap);
- hw_min = HWP_LOWEST_PERF(cap);
- hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
-
for_each_cpu(cpu, cpumask) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 94f77b0f9..32f645ea7 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -650,7 +650,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
ccp->name);
- if (!dma_cmd_cache_name)
+ if (!dma_desc_cache_name)
return -ENOMEM;
ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
sizeof(struct ccp_dma_desc),
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index d64af8625..37dadb2a4 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
if (!req)
break;
+ ctx = crypto_tfm_ctx(req->tfm);
mv_cesa_complete_req(ctx, req, 0);
}
}
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 82e0f4e6e..b111e14ba 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -805,13 +805,14 @@ static int mv_cesa_md5_init(struct ahash_request *req)
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
+
+ mv_cesa_ahash_init(req, &tmpl, true);
+
creq->state[0] = MD5_H0;
creq->state[1] = MD5_H1;
creq->state[2] = MD5_H2;
creq->state[3] = MD5_H3;
- mv_cesa_ahash_init(req, &tmpl, true);
-
return 0;
}
@@ -873,14 +874,15 @@ static int mv_cesa_sha1_init(struct ahash_request *req)
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
+
+ mv_cesa_ahash_init(req, &tmpl, false);
+
creq->state[0] = SHA1_H0;
creq->state[1] = SHA1_H1;
creq->state[2] = SHA1_H2;
creq->state[3] = SHA1_H3;
creq->state[4] = SHA1_H4;
- mv_cesa_ahash_init(req, &tmpl, false);
-
return 0;
}
@@ -942,6 +944,9 @@ static int mv_cesa_sha256_init(struct ahash_request *req)
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
+
+ mv_cesa_ahash_init(req, &tmpl, false);
+
creq->state[0] = SHA256_H0;
creq->state[1] = SHA256_H1;
creq->state[2] = SHA256_H2;
@@ -951,8 +956,6 @@ static int mv_cesa_sha256_init(struct ahash_request *req)
creq->state[6] = SHA256_H6;
creq->state[7] = SHA256_H7;
- mv_cesa_ahash_init(req, &tmpl, false);
-
return 0;
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 2bf37e68a..dd184b50e 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc)
raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
- unsigned int irq = NO_IRQ;
+ unsigned int irq;
line--;
status &= ~(1UL << line);
raw_spin_lock(&bank_lock);
map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
- raw_spin_unlock(&bank_lock);
-
if (!map) {
+ raw_spin_unlock(&bank_lock);
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
line, i);
continue;
}
+ irq = map->irq;
+ raw_spin_unlock(&bank_lock);
generic_handle_irq(irq);
}
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39..793518a30 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 17e13621f..4e71a680e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b..14f57d991 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
printk("\n");
}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d942654a1..e24a8af72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -292,7 +292,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0;
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
+ ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index c1b04e9aa..172bed946 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -425,16 +425,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
-
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
idx = 0;
@@ -458,6 +448,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ continue;
+ }
+
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d4bf13390..67c7c05a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
-
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
idx = 0;
@@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
+ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
+ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
+ continue;
+ }
+
tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
@@ -3109,6 +3112,7 @@ static int dce_v11_0_sw_fini(void *handle)
dce_v11_0_afmt_fini(adev);
+ drm_mode_config_cleanup(adev->ddev);
adev->mode_info.mode_config_initialized = false;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4fdfab1e9..ea07c5036 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -395,15 +395,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- continue;
- }
switch (amdgpu_connector->hpd.hpd) {
case AMDGPU_HPD_1:
WREG32(mmDC_HPD1_CONTROL, tmp);
@@ -426,6 +417,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
default:
break;
}
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
+
+ switch (amdgpu_connector->hpd.hpd) {
+ case AMDGPU_HPD_1:
+ dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_2:
+ dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_3:
+ dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_4:
+ dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_5:
+ dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
+ break;
+ case AMDGPU_HPD_6:
+ dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
+ break;
+ default:
+ continue;
+ }
+
+ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
+ continue;
+ }
+
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 635fc4b48..92b117843 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
unblock_adjust_power_state_tasks,
set_cpu_power_state,
notify_hw_power_source_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
/* updateDALConfigurationTasks,
variBrightDisplayConfigurationChangeTasks, */
adjust_power_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index a46225c0f..d6bee7274 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -100,11 +100,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
if (requested == NULL)
return 0;
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
equal = false;
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
hwmgr->current_ps = requested;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 780589b42..9c4387d79 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -335,14 +335,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* using the PRIME helpers.
*/
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+ struct drm_gem_object *obj,
+ int flags)
{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &drm_gem_prime_dmabuf_ops;
- exp_info.size = obj->size;
- exp_info.flags = flags;
- exp_info.priv = obj;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &drm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ };
if (dev->driver->gem_prime_res_obj)
exp_info.resv = dev->driver->gem_prime_res_obj(obj);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 7882387f9..5fc8ebdf4 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -330,6 +330,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
const char *pix_clk_in_name;
const struct of_device_id *id;
int ret;
+ u8 div_ratio_shift = 0;
fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
if (!fsl_dev)
@@ -382,11 +383,14 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
pix_clk_in = fsl_dev->clk;
}
+ if (of_property_read_bool(dev->of_node, "big-endian"))
+ div_ratio_shift = 24;
+
pix_clk_in_name = __clk_get_name(pix_clk_in);
snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
pix_clk_in_name, 0, base + DCU_DIV_RATIO,
- 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+ div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
if (IS_ERR(fsl_dev->pix_clk)) {
dev_err(dev, "failed to register pix clk\n");
ret = PTR_ERR(fsl_dev->pix_clk);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f68c78918..84a001058 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -631,6 +631,8 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
+ void (*update_crtcs)(struct drm_atomic_state *state,
+ unsigned int *crtc_vblank_mask);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
@@ -1965,11 +1967,11 @@ struct drm_i915_private {
struct vlv_s0ix_state vlv_s0ix_state;
enum {
- I915_SKL_SAGV_UNKNOWN = 0,
- I915_SKL_SAGV_DISABLED,
- I915_SKL_SAGV_ENABLED,
- I915_SKL_SAGV_NOT_CONTROLLED
- } skl_sagv_status;
+ I915_SAGV_UNKNOWN = 0,
+ I915_SAGV_DISABLED,
+ I915_SAGV_ENABLED,
+ I915_SAGV_NOT_CONTROLLED
+ } sagv_status;
struct {
/*
@@ -2280,21 +2282,19 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
- union {
- /** for phy allocated objects */
- struct drm_dma_handle *phys_handle;
-
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
- unsigned workers :4;
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
+ unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
- };
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 66be299a1..2bb69f3c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -115,17 +115,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
+ u32 tseg_size = 0;
u16 toud = 0;
+ u8 tmp;
+
+ pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
+ I845_ESMRAMC, &tmp);
+
+ if (tmp & TSEG_ENABLE) {
+ switch (tmp & I845_TSEG_SIZE_MASK) {
+ case I845_TSEG_SIZE_512K:
+ tseg_size = KB(512);
+ break;
+ case I845_TSEG_SIZE_1M:
+ tseg_size = MB(1);
+ break;
+ }
+ }
- /*
- * FIXME is the graphics stolen memory region
- * always at TOUD? Ie. is it always the last
- * one to be allocated by the BIOS?
- */
pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
I865_TOUD, &toud);
- base = toud << 16;
+ base = (toud << 16) + tseg_size;
} else if (IS_I85X(dev)) {
u32 tseg_size = 0;
u32 tom;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 175595fc3..e9a64fba6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2980,6 +2980,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride_div, stride;
u32 tile_height, plane_offset, plane_size;
@@ -3031,6 +3032,9 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = x_offset;
intel_crtc->adjusted_y = y_offset;
+ if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
+ skl_write_plane_wm(intel_crtc, wm, 0);
+
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
@@ -3061,7 +3065,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ /*
+ * We only populate skl_results on watermark updates, and if the
+ * plane's visiblity isn't actually changing neither is its watermarks.
+ */
+ if (!to_intel_plane_state(crtc->primary->state)->visible)
+ skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
@@ -8995,6 +9007,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_dp_encoder(crtc_state))
dpll |= DPLL_SDVO_HIGH_SPEED;
+ /*
+ * The high speed IO clock is only really required for
+ * SDVO/HDMI/DP, but we also enable it for CRT to make it
+ * possible to share the DPLL between CRT and HDMI. Enabling
+ * the clock needlessly does no real harm, except use up a
+ * bit of power potentially.
+ *
+ * We'll limit this to IVB with 3 pipes, since it has only two
+ * DPLLs and so DPLL sharing is the only way to get three pipes
+ * driving PCH ports at the same time. On SNB we could do this,
+ * and potentially avoid enabling the second DPLL, but it's not
+ * clear if it''s a win or loss power wise. No point in doing
+ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
+ */
+ if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ dpll |= DPLL_SDVO_HIGH_SPEED;
+
/* compute bitmask from p1 value */
dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
@@ -10306,9 +10336,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
+ if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
+ skl_write_cursor_wm(intel_crtc, wm);
+
if (plane_state && plane_state->visible) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (plane_state->base.crtc_w) {
@@ -12956,16 +12990,23 @@ static void verify_wm_state(struct drm_crtc *crtc,
hw_entry->start, hw_entry->end);
}
- /* cursor */
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
+ /*
+ * cursor
+ * If the cursor plane isn't active, we may not have updated it's ddb
+ * allocation. In that case since the ddb allocation will be updated
+ * once the plane becomes visible, we can skip this check
+ */
+ if (intel_crtc->cursor_addr) {
+ hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
+ DRM_ERROR("mismatch in DDB state pipe %c cursor "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
}
}
@@ -13671,6 +13712,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
return false;
}
+static void intel_update_crtc(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *old_crtc_state,
+ unsigned int *crtc_vblank_mask)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
+ bool modeset = needs_modeset(crtc->state);
+
+ if (modeset) {
+ update_scanline_offset(intel_crtc);
+ dev_priv->display.crtc_enable(crtc);
+ } else {
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+ }
+
+ if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
+ intel_fbc_enable(
+ intel_crtc, pipe_config,
+ to_intel_plane_state(crtc->primary->state));
+ }
+
+ drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+
+ if (needs_vblank_wait(pipe_config))
+ *crtc_vblank_mask |= drm_crtc_mask(crtc);
+}
+
+static void intel_update_crtcs(struct drm_atomic_state *state,
+ unsigned int *crtc_vblank_mask)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ if (!crtc->state->active)
+ continue;
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ crtc_vblank_mask);
+ }
+}
+
+static void skl_update_crtcs(struct drm_atomic_state *state,
+ unsigned int *crtc_vblank_mask)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ unsigned int updated = 0;
+ bool progress;
+ enum pipe pipe;
+
+ /*
+ * Whenever the number of active pipes changes, we need to make sure we
+ * update the pipes in the right order so that their ddb allocations
+ * never overlap with eachother inbetween CRTC updates. Otherwise we'll
+ * cause pipe underruns and other bad stuff.
+ */
+ do {
+ int i;
+ progress = false;
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ bool vbl_wait = false;
+ unsigned int cmask = drm_crtc_mask(crtc);
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ if (updated & cmask || !crtc->state->active)
+ continue;
+ if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
+ pipe))
+ continue;
+
+ updated |= cmask;
+
+ /*
+ * If this is an already active pipe, it's DDB changed,
+ * and this isn't the last pipe that needs updating
+ * then we need to wait for a vblank to pass for the
+ * new ddb allocation to take effect.
+ */
+ if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
+ !crtc->state->active_changed &&
+ intel_state->wm_results.dirty_pipes != updated)
+ vbl_wait = true;
+
+ intel_update_crtc(crtc, state, old_crtc_state,
+ crtc_vblank_mask);
+
+ if (vbl_wait)
+ intel_wait_for_vblank(dev, pipe);
+
+ progress = true;
+ }
+ } while (progress);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -13763,23 +13909,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* SKL workaround: bspec recommends we disable the SAGV when we
* have more then one pipe enabled
*/
- if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
- skl_disable_sagv(dev_priv);
+ if (!intel_can_enable_sagv(state))
+ intel_disable_sagv(dev_priv);
intel_modeset_verify_disabled(dev);
}
- /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ /* Complete the events for pipes that have now been disabled */
for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->state);
-
- if (modeset && crtc->state->active) {
- update_scanline_offset(to_intel_crtc(crtc));
- dev_priv->display.crtc_enable(crtc);
- }
/* Complete events for now disable pipes here. */
if (modeset && !crtc->state->active && crtc->state->event) {
@@ -13789,21 +13927,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
crtc->state->event = NULL;
}
-
- if (!modeset)
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
-
- if (crtc->state->active &&
- drm_atomic_get_existing_plane_state(state, crtc->primary))
- intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
-
- if (crtc->state->active)
- drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
- if (pipe_config->base.active && needs_vblank_wait(pipe_config))
- crtc_vblank_mask |= 1 << i;
}
+ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
+ dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
* fix this:
@@ -13839,9 +13967,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
- if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
- skl_can_enable_sagv(state))
- skl_enable_sagv(dev_priv);
+ if (intel_state->modeset && intel_can_enable_sagv(state))
+ intel_enable_sagv(dev_priv);
drm_atomic_helper_commit_hw_done(state);
@@ -14221,10 +14348,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *old_intel_state =
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
+ enum pipe pipe = intel_crtc->pipe;
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
@@ -14239,8 +14368,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (to_intel_crtc_state(crtc->state)->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_state);
- else if (INTEL_INFO(dev)->gen >= 9)
+ else if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(intel_crtc);
+
+ I915_WRITE(PIPE_WM_LINETIME(pipe),
+ dev_priv->wm.skl_hw.wm_linetime[pipe]);
+ }
}
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
@@ -15347,6 +15480,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
skl_modeset_calc_cdclk;
}
+ if (dev_priv->info.gen >= 9)
+ dev_priv->display.update_crtcs = skl_update_crtcs;
+ else
+ dev_priv->display.update_crtcs = intel_update_crtcs;
+
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 21b04c3ed..1ca155f4d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4148,7 +4148,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
*
* Return %true if @port is connected, %false otherwise.
*/
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
@@ -4207,7 +4207,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = false;
}
-static void
+static enum drm_connector_status
intel_dp_long_pulse(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
@@ -4232,7 +4232,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
else
status = connector_status_disconnected;
- if (status != connector_status_connected) {
+ if (status == connector_status_disconnected) {
intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
@@ -4284,8 +4284,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
-
- status = connector_status_connected;
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
+ status = connector_status_connected;
intel_dp->detect_done = true;
/* Try to read the source of the interrupt */
@@ -4303,12 +4303,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
}
out:
- if ((status != connector_status_connected) &&
- (intel_dp->is_mst == false))
+ if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
intel_display_power_put(to_i915(dev), power_domain);
- return;
+ return status;
}
static enum drm_connector_status
@@ -4317,7 +4316,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ enum drm_connector_status status = connector->status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -4332,14 +4331,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
- intel_dp_long_pulse(intel_dp->attached_connector);
+ status = intel_dp_long_pulse(intel_dp->attached_connector);
intel_dp->detect_done = false;
- if (is_edp(intel_dp) || intel_connector->detect_edid)
- return connector_status_connected;
- else
- return connector_status_disconnected;
+ return status;
}
static void
@@ -4696,36 +4692,34 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
+ if (long_hpd) {
+ intel_dp->detect_done = false;
+ return IRQ_NONE;
+ }
+
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
- if (long_hpd) {
- intel_dp_long_pulse(intel_dp->attached_connector);
- if (intel_dp->is_mst)
- ret = IRQ_HANDLED;
- goto put_power;
-
- } else {
- if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
- /*
- * If we were in MST mode, and device is not
- * there, get out of MST mode
- */
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
- intel_dp->is_mst);
- goto put_power;
- }
+ if (intel_dp->is_mst) {
+ if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ /*
+ * If we were in MST mode, and device is not
+ * there, get out of MST mode
+ */
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ intel_dp->detect_done = false;
+ goto put_power;
}
+ }
- if (!intel_dp->is_mst) {
- if (!intel_dp_short_pulse(intel_dp)) {
- intel_dp_long_pulse(intel_dp->attached_connector);
- goto put_power;
- }
+ if (!intel_dp->is_mst) {
+ if (!intel_dp_short_pulse(intel_dp)) {
+ intel_dp->detect_done = false;
+ goto put_power;
}
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ff399b9a5..9a58800cb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,6 +236,7 @@ struct intel_panel {
bool enabled;
bool combination_mode; /* gen 2/4 only */
bool active_low_pwm;
+ bool alternate_pwm_increment; /* lpt+ */
/* PWM chip */
bool util_pin_active_low; /* bxt+ */
@@ -1387,8 +1388,6 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1716,9 +1715,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
-bool skl_can_enable_sagv(struct drm_atomic_state *state);
-int skl_enable_sagv(struct drm_i915_private *dev_priv);
-int skl_disable_sagv(struct drm_i915_private *dev_priv);
+bool intel_can_enable_sagv(struct drm_atomic_state *state);
+int intel_enable_sagv(struct drm_i915_private *dev_priv);
+int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe);
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+ const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe);
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm);
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm,
+ int plane);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4df9f3849..c3aa9e670 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1422,24 +1422,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
}
static bool
-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
+intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
- struct edid *edid = NULL;
+ struct edid *edid;
bool connected = false;
- if (force) {
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus));
+ edid = drm_get_edid(connector,
+ intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus));
- intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
- }
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1465,37 +1463,16 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
- bool live_status = false;
- unsigned int try;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- for (try = 0; !live_status && try < 9; try++) {
- if (try)
- msleep(10);
- live_status = intel_digital_port_connected(dev_priv,
- hdmi_to_dig_port(intel_hdmi));
- }
-
- if (!live_status) {
- DRM_DEBUG_KMS("HDMI live status down\n");
- /*
- * Live status register is not reliable on all intel platforms.
- * So consider live_status only for certain platforms, for
- * others, read EDID to determine presence of sink.
- */
- if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
- live_status = true;
- }
-
intel_hdmi_unset_edid(connector);
- if (intel_hdmi_set_edid(connector, live_status)) {
+ if (intel_hdmi_set_edid(connector)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1521,7 +1498,7 @@ intel_hdmi_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_hdmi_set_edid(connector, true);
+ intel_hdmi_set_edid(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c65d77e..9a2393a6b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -841,7 +841,7 @@ static void lpt_enable_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2;
+ u32 pch_ctl1, pch_ctl2, schicken;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
@@ -850,6 +850,22 @@ static void lpt_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
}
+ if (HAS_PCH_LPT(dev_priv)) {
+ schicken = I915_READ(SOUTH_CHICKEN2);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= LPT_PWM_GRANULARITY;
+ else
+ schicken &= ~LPT_PWM_GRANULARITY;
+ I915_WRITE(SOUTH_CHICKEN2, schicken);
+ } else {
+ schicken = I915_READ(SOUTH_CHICKEN1);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= SPT_PWM_GRANULARITY;
+ else
+ schicken &= ~SPT_PWM_GRANULARITY;
+ I915_WRITE(SOUTH_CHICKEN1, schicken);
+ }
+
pch_ctl2 = panel->backlight.max << 16;
I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
@@ -1242,10 +1258,10 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
u32 mul;
- if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
+ if (panel->backlight.alternate_pwm_increment)
mul = 128;
else
mul = 16;
@@ -1261,9 +1277,10 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
u32 mul, clock;
- if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
+ if (panel->backlight.alternate_pwm_increment)
mul = 16;
else
mul = 128;
@@ -1414,6 +1431,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, val;
+ bool alt;
+
+ if (HAS_PCH_LPT(dev_priv))
+ alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ else
+ alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ panel->backlight.alternate_pwm_increment = alt;
pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 0f042d133..e59a28cb3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2119,32 +2119,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
GEN9_MEM_LATENCY_LEVEL_MASK;
/*
+ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+ * need to be disabled. We make sure to sanitize the values out
+ * of the punit to satisfy this requirement.
+ */
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0) {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+ break;
+ }
+ }
+
+ /*
* WaWmMemoryReadLatency:skl
*
* punit doesn't take into account the read latency so we need
- * to add 2us to the various latency levels we retrieve from
- * the punit.
- * - W0 is a bit special in that it's the only level that
- * can't be disabled if we want to have display working, so
- * we always add 2us there.
- * - For levels >=1, punit returns 0us latency when they are
- * disabled, so we respect that and don't add 2us then
- *
- * Additionally, if a level n (n > 1) has a 0us latency, all
- * levels m (m >= n) need to be disabled. We make sure to
- * sanitize the values out of the punit to satisfy this
- * requirement.
+ * to add 2us to the various latency levels we retrieve from the
+ * punit when level 0 response data us 0us.
*/
- wm[0] += 2;
- for (level = 1; level <= max_level; level++)
- if (wm[level] != 0)
+ if (wm[0] == 0) {
+ wm[0] += 2;
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0)
+ break;
wm[level] += 2;
- else {
- for (i = level + 1; i <= max_level; i++)
- wm[i] = 0;
-
- break;
}
+ }
+
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
@@ -2876,6 +2878,19 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
+static bool
+intel_has_sagv(struct drm_i915_private *dev_priv)
+{
+ if (IS_KABYLAKE(dev_priv))
+ return true;
+
+ if (IS_SKYLAKE(dev_priv) &&
+ dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
+ return true;
+
+ return false;
+}
+
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
@@ -2888,12 +2903,14 @@ skl_wm_plane_id(const struct intel_plane *plane)
* - We're not using an interlaced display configuration
*/
int
-skl_enable_sagv(struct drm_i915_private *dev_priv)
+intel_enable_sagv(struct drm_i915_private *dev_priv)
{
int ret;
- if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
- dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
+ if (!intel_has_sagv(dev_priv))
+ return 0;
+
+ if (dev_priv->sagv_status == I915_SAGV_ENABLED)
return 0;
DRM_DEBUG_KMS("Enabling the SAGV\n");
@@ -2909,21 +2926,21 @@ skl_enable_sagv(struct drm_i915_private *dev_priv)
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (ret == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
- dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (ret < 0) {
DRM_ERROR("Failed to enable the SAGV\n");
return ret;
}
- dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
return 0;
}
static int
-skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+intel_do_sagv_disable(struct drm_i915_private *dev_priv)
{
int ret;
uint32_t temp = GEN9_SAGV_DISABLE;
@@ -2937,19 +2954,21 @@ skl_do_sagv_disable(struct drm_i915_private *dev_priv)
}
int
-skl_disable_sagv(struct drm_i915_private *dev_priv)
+intel_disable_sagv(struct drm_i915_private *dev_priv)
{
int ret, result;
- if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
- dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
+ if (!intel_has_sagv(dev_priv))
+ return 0;
+
+ if (dev_priv->sagv_status == I915_SAGV_DISABLED)
return 0;
DRM_DEBUG_KMS("Disabling the SAGV\n");
mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+ ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret == -ETIMEDOUT) {
@@ -2961,20 +2980,20 @@ skl_disable_sagv(struct drm_i915_private *dev_priv)
* Some skl systems, pre-release machines in particular,
* don't actually have an SAGV.
*/
- if (result == -ENXIO) {
+ if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
- dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
return 0;
} else if (result < 0) {
DRM_ERROR("Failed to disable the SAGV\n");
return result;
}
- dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+ dev_priv->sagv_status = I915_SAGV_DISABLED;
return 0;
}
-bool skl_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2983,6 +3002,9 @@ bool skl_can_enable_sagv(struct drm_atomic_state *state)
enum pipe pipe;
int level, plane;
+ if (!intel_has_sagv(dev_priv))
+ return false;
+
/*
* SKL workaround: bspec recommends we disable the SAGV when we have
* more then one pipe enabled
@@ -3473,29 +3495,14 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latenc
}
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t horiz_pixels, uint8_t cpp,
- uint64_t tiling, uint32_t latency)
+ uint32_t latency, uint32_t plane_blocks_per_line)
{
uint32_t ret;
- uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t wm_intermediate_val;
if (latency == 0)
return UINT_MAX;
- plane_bytes_per_line = horiz_pixels * cpp;
-
- if (tiling == I915_FORMAT_MOD_Y_TILED ||
- tiling == I915_FORMAT_MOD_Yf_TILED) {
- plane_bytes_per_line *= 4;
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
- plane_blocks_per_line /= 4;
- } else if (tiling == DRM_FORMAT_MOD_NONE) {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
- } else {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
- }
-
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
plane_blocks_per_line;
@@ -3546,6 +3553,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint8_t cpp;
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
+ uint32_t y_tile_minimum, y_min_scanlines;
if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
*enabled = false;
@@ -3561,38 +3569,51 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+ if (intel_rotation_90_or_270(pstate->rotation)) {
+ int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
+ drm_format_plane_cpp(fb->pixel_format, 1) :
+ drm_format_plane_cpp(fb->pixel_format, 0);
+
+ switch (cpp) {
+ case 1:
+ y_min_scanlines = 16;
+ break;
+ case 2:
+ y_min_scanlines = 8;
+ break;
+ default:
+ WARN(1, "Unsupported pixel depth for rotation");
+ case 4:
+ y_min_scanlines = 4;
+ break;
+ }
+ } else {
+ y_min_scanlines = 4;
+ }
+
+ plane_bytes_per_line = width * cpp;
+ if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ plane_blocks_per_line =
+ DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
+ plane_blocks_per_line /= y_min_scanlines;
+ } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
+ + 1;
+ } else {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ }
+
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
method2 = skl_wm_method2(plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
- width,
- cpp,
- fb->modifier[0],
- latency);
+ latency,
+ plane_blocks_per_line);
- plane_bytes_per_line = width * cpp;
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
- uint32_t min_scanlines = 4;
- uint32_t y_tile_minimum;
- if (intel_rotation_90_or_270(pstate->rotation)) {
- int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
- drm_format_plane_cpp(fb->pixel_format, 1) :
- drm_format_plane_cpp(fb->pixel_format, 0);
-
- switch (cpp) {
- case 1:
- min_scanlines = 16;
- break;
- case 2:
- min_scanlines = 8;
- break;
- case 8:
- WARN(1, "Unsupported pixel depth for rotation");
- }
- }
- y_tile_minimum = plane_blocks_per_line * min_scanlines;
selected_result = max(method2, y_tile_minimum);
} else {
if ((ddb_allocation / plane_blocks_per_line) >= 1)
@@ -3606,10 +3627,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (level >= 1 && level <= 7) {
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
- res_lines += 4;
- else
+ fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
+ res_blocks += y_tile_minimum;
+ res_lines += y_min_scanlines;
+ } else {
res_blocks++;
+ }
}
if (res_blocks >= ddb_allocation || res_lines > 31) {
@@ -3828,182 +3851,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
I915_WRITE(reg, 0);
}
-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
- const struct skl_wm_values *new)
+void skl_write_plane_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm,
+ int plane)
{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc) {
- int i, level, max_level = ilk_wm_max_level(dev);
- enum pipe pipe = crtc->pipe;
-
- if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
- continue;
- if (!crtc->active)
- continue;
-
- I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
- for (level = 0; level <= max_level; level++) {
- for (i = 0; i < intel_num_planes(crtc); i++)
- I915_WRITE(PLANE_WM(pipe, i, level),
- new->plane[pipe][i][level]);
- I915_WRITE(CUR_WM(pipe, level),
- new->plane[pipe][PLANE_CURSOR][level]);
- }
- for (i = 0; i < intel_num_planes(crtc); i++)
- I915_WRITE(PLANE_WM_TRANS(pipe, i),
- new->plane_trans[pipe][i]);
- I915_WRITE(CUR_WM_TRANS(pipe),
- new->plane_trans[pipe][PLANE_CURSOR]);
-
- for (i = 0; i < intel_num_planes(crtc); i++) {
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, i),
- &new->ddb.plane[pipe][i]);
- skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, i),
- &new->ddb.y_plane[pipe][i]);
- }
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &new->ddb.plane[pipe][PLANE_CURSOR]);
+ for (level = 0; level <= max_level; level++) {
+ I915_WRITE(PLANE_WM(pipe, plane, level),
+ wm->plane[pipe][plane][level]);
}
-}
+ I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
-/*
- * When setting up a new DDB allocation arrangement, we need to correctly
- * sequence the times at which the new allocations for the pipes are taken into
- * account or we'll have pipes fetching from space previously allocated to
- * another pipe.
- *
- * Roughly the sequence looks like:
- * 1. re-allocate the pipe(s) with the allocation being reduced and not
- * overlapping with a previous light-up pipe (another way to put it is:
- * pipes with their new allocation strickly included into their old ones).
- * 2. re-allocate the other pipes that get their allocation reduced
- * 3. allocate the pipes having their allocation increased
- *
- * Steps 1. and 2. are here to take care of the following case:
- * - Initially DDB looks like this:
- * | B | C |
- * - enable pipe A.
- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
- * allocation
- * | A | B | C |
- *
- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
- */
+ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
+ &wm->ddb.plane[pipe][plane]);
+ skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
+ &wm->ddb.y_plane[pipe][plane]);
+}
-static void
-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
+void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
+ const struct skl_wm_values *wm)
{
- int plane;
-
- DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
+ struct drm_crtc *crtc = &intel_crtc->base;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int level, max_level = ilk_wm_max_level(dev);
+ enum pipe pipe = intel_crtc->pipe;
- for_each_plane(dev_priv, pipe, plane) {
- I915_WRITE(PLANE_SURF(pipe, plane),
- I915_READ(PLANE_SURF(pipe, plane)));
+ for (level = 0; level <= max_level; level++) {
+ I915_WRITE(CUR_WM(pipe, level),
+ wm->plane[pipe][PLANE_CURSOR][level]);
}
+ I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
+
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
+ &wm->ddb.plane[pipe][PLANE_CURSOR]);
}
-static bool
-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
- const struct skl_ddb_allocation *new,
- enum pipe pipe)
+bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
{
- uint16_t old_size, new_size;
-
- old_size = skl_ddb_entry_size(&old->pipe[pipe]);
- new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
- return old_size != new_size &&
- new->pipe[pipe].start >= old->pipe[pipe].start &&
- new->pipe[pipe].end <= old->pipe[pipe].end;
+ return new->pipe[pipe].start == old->pipe[pipe].start &&
+ new->pipe[pipe].end == old->pipe[pipe].end;
}
-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
- struct skl_wm_values *new_values)
+static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
{
- struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *cur_ddb, *new_ddb;
- bool reallocated[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
- enum pipe pipe;
-
- new_ddb = &new_values->ddb;
- cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
- /*
- * First pass: flush the pipes with the new allocation contained into
- * the old space.
- *
- * We'll wait for the vblank on those pipes to ensure we can safely
- * re-allocate the freed space without this pipe fetching from it.
- */
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
-
- pipe = crtc->pipe;
-
- if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
- continue;
-
- skl_wm_flush_pipe(dev_priv, pipe, 1);
- intel_wait_for_vblank(dev, pipe);
-
- reallocated[pipe] = true;
- }
-
+ return a->start < b->end && b->start < a->end;
+}
- /*
- * Second pass: flush the pipes that are having their allocation
- * reduced, but overlapping with a previous allocation.
- *
- * Here as well we need to wait for the vblank to make sure the freed
- * space is not used anymore.
- */
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
+bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
+ const struct skl_ddb_allocation *old,
+ const struct skl_ddb_allocation *new,
+ enum pipe pipe)
+{
+ struct drm_device *dev = state->dev;
+ struct intel_crtc *intel_crtc;
+ enum pipe otherp;
- pipe = crtc->pipe;
+ for_each_intel_crtc(dev, intel_crtc) {
+ otherp = intel_crtc->pipe;
- if (reallocated[pipe])
+ if (otherp == pipe)
continue;
- if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
- skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
- skl_wm_flush_pipe(dev_priv, pipe, 2);
- intel_wait_for_vblank(dev, pipe);
- reallocated[pipe] = true;
- }
+ if (skl_ddb_entries_overlap(&new->pipe[pipe],
+ &old->pipe[otherp]))
+ return true;
}
- /*
- * Third pass: flush the pipes that got more space allocated.
- *
- * We don't need to actively wait for the update here, next vblank
- * will just get more DDB space with the correct WM values.
- */
- for_each_intel_crtc(dev, crtc) {
- if (!crtc->active)
- continue;
-
- pipe = crtc->pipe;
-
- /*
- * At this point, only the pipes more space than before are
- * left to re-allocate.
- */
- if (reallocated[pipe])
- continue;
-
- skl_wm_flush_pipe(dev_priv, pipe, 3);
- }
+ return false;
}
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
@@ -4040,6 +3963,41 @@ pipes_modified(struct drm_atomic_state *state)
return ret;
}
+int
+skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct drm_device *dev = state->dev;
+ struct drm_crtc *crtc = cstate->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ enum pipe pipe = intel_crtc->pipe;
+ int id;
+
+ WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
+
+ drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
+ id = skl_wm_plane_id(to_intel_plane(plane));
+
+ if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
+ &new_ddb->plane[pipe][id]) &&
+ skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
+ &new_ddb->y_plane[pipe][id]))
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
static int
skl_compute_ddb(struct drm_atomic_state *state)
{
@@ -4104,6 +4062,10 @@ skl_compute_ddb(struct drm_atomic_state *state)
if (ret)
return ret;
+ ret = skl_ddb_add_affected_planes(cstate);
+ if (ret)
+ return ret;
+
ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
if (ret)
return ret;
@@ -4205,7 +4167,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- int pipe;
+ enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
@@ -4214,15 +4176,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
mutex_lock(&dev_priv->wm.wm_mutex);
- skl_write_wm_values(dev_priv, results);
- skl_flush_wm_values(dev_priv, results);
-
/*
- * Store the new configuration (but only for the pipes that have
- * changed; the other values weren't recomputed).
+ * If this pipe isn't active already, we're going to be enabling it
+ * very soon. Since it's safe to update a pipe's ddb allocation while
+ * the pipe's shut off, just do so here. Already active pipes will have
+ * their watermarks updated once we update their planes.
*/
- for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
- skl_copy_wm_for_pipe(hw_vals, results, pipe);
+ if (crtc->state->active_changed) {
+ int plane;
+
+ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
+ skl_write_plane_wm(intel_crtc, results, plane);
+
+ skl_write_cursor_wm(intel_crtc, results);
+ }
+
+ skl_copy_wm_for_pipe(hw_vals, results, pipe);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7c08e4f29..417884963 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,6 +203,9 @@ skl_update_plane(struct drm_plane *drm_plane,
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride_div, stride;
@@ -238,6 +241,9 @@ skl_update_plane(struct drm_plane *drm_plane,
crtc_w--;
crtc_h--;
+ if (wm->dirty_pipes & drm_crtc_mask(crtc))
+ skl_write_plane_wm(intel_crtc, wm, plane);
+
if (key->flags) {
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
@@ -308,6 +314,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
+ /*
+ * We only populate skl_results on watermark updates, and if the
+ * plane's visiblity isn't actually changing neither is its watermarks.
+ */
+ if (!to_intel_plane_state(dplane->state)->visible)
+ skl_write_plane_wm(to_intel_crtc(crtc),
+ &dev_priv->wm.skl_results, plane);
+
I915_WRITE(PLANE_CTL(pipe, plane), 0);
I915_WRITE(PLANE_SURF(pipe, plane), 0);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ff80a81b1..ec28b15f2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -796,10 +796,9 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(dev_priv),
- "Unclaimed register detected %s %s register 0x%x\n",
- before ? "before" : "after",
- read ? "reading" : "writing to",
+ if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+ "Unclaimed %s register 0x%x\n",
+ read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
i915.mmio_debug--; /* Only report the first N failures */
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd..5a26eb454 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ vblank_in_pixels =
+ radeon_crtc->hw_mode.crtc_htotal *
+ (radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a00dd2f74..554ca7115 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -661,8 +661,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- /* for pass through, always force asic_init */
- if (radeon_device_is_virtual())
+ /* for pass through, always force asic_init for CI */
+ if (rdev->family >= CHIP_BONAIRE &&
+ radeon_device_is_virtual())
return false;
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1f78ec254..89bdf2034 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -4112,7 +4112,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
&rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 3c779838d..966e3a556 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
+#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
#define SISLANDS_SMC_VOLTAGEMASK_MAX 4
struct SISLANDS_SMC_VOLTAGEMASKTABLE
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 428e24919..f696b7528 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -122,9 +122,16 @@ to_vc4_dev(struct drm_device *dev)
struct vc4_bo {
struct drm_gem_cma_object base;
- /* seqno of the last job to render to this BO. */
+ /* seqno of the last job to render using this BO. */
uint64_t seqno;
+ /* seqno of the last job to use the RCL to write to this BO.
+ *
+ * Note that this doesn't include binner overflow memory
+ * writes.
+ */
+ uint64_t write_seqno;
+
/* List entry for the BO's position in either
* vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
*/
@@ -216,6 +223,9 @@ struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
+ /* Latest write_seqno of any BO that binning depends on. */
+ uint64_t bin_dep_seqno;
+
/* Last current addresses the hardware was processing when the
* hangcheck timer checked on us.
*/
@@ -230,6 +240,13 @@ struct vc4_exec_info {
struct drm_gem_cma_object **bo;
uint32_t bo_count;
+ /* List of BOs that are being written by the RCL. Other than
+ * the binner temporary storage, this is all the BOs written
+ * by the job.
+ */
+ struct drm_gem_cma_object *rcl_write_bo[4];
+ uint32_t rcl_write_bo_count;
+
/* Pointers for our position in vc4->job_list */
struct list_head head;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index b262c5c26..ae1609e73 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -471,6 +471,11 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
list_for_each_entry(bo, &exec->unref_list, unref_head) {
bo->seqno = seqno;
}
+
+ for (i = 0; i < exec->rcl_write_bo_count; i++) {
+ bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
+ bo->write_seqno = seqno;
+ }
}
/* Queues a struct vc4_exec_info for execution. If no job is
@@ -673,6 +678,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
goto fail;
ret = vc4_validate_shader_recs(dev, exec);
+ if (ret)
+ goto fail;
+
+ /* Block waiting on any previous rendering into the CS's VBO,
+ * IB, or textures, so that pixels are actually written by the
+ * time we try to read them.
+ */
+ ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
fail:
drm_free_large(temp);
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 0f1241872..08886a309 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -45,6 +45,8 @@ struct vc4_rcl_setup {
struct drm_gem_cma_object *rcl;
u32 next_offset;
+
+ u32 next_write_bo_index;
};
static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
@@ -407,6 +409,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
if (!*obj)
return -EINVAL;
+ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
if (surf->offset & 0xf) {
DRM_ERROR("MSAA write must be 16b aligned.\n");
return -EINVAL;
@@ -417,7 +421,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
struct drm_gem_cma_object **obj,
- struct drm_vc4_submit_rcl_surface *surf)
+ struct drm_vc4_submit_rcl_surface *surf,
+ bool is_write)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
VC4_LOADSTORE_TILE_BUFFER_TILING);
@@ -440,6 +445,9 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (!*obj)
return -EINVAL;
+ if (is_write)
+ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
if (surf == &exec->args->zs_write) {
DRM_ERROR("general zs write may not be a full-res.\n");
@@ -542,6 +550,8 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
if (!*obj)
return -EINVAL;
+ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
+
if (tiling > VC4_TILING_FORMAT_LT) {
DRM_ERROR("Bad tiling format\n");
return -EINVAL;
@@ -599,15 +609,18 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (ret)
return ret;
- ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
+ ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read,
+ false);
if (ret)
return ret;
- ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read,
+ false);
if (ret)
return ret;
- ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
+ ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write,
+ true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 9ce1d0adf..26503e307 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -267,6 +267,9 @@ validate_indexed_prim_list(VALIDATE_ARGS)
if (!ib)
return -EINVAL;
+ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+ to_vc4_bo(&ib->base)->write_seqno);
+
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
@@ -555,8 +558,7 @@ static bool
reloc_tex(struct vc4_exec_info *exec,
void *uniform_data_u,
struct vc4_texture_sample_info *sample,
- uint32_t texture_handle_index)
-
+ uint32_t texture_handle_index, bool is_cs)
{
struct drm_gem_cma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
@@ -714,6 +716,11 @@ reloc_tex(struct vc4_exec_info *exec,
*validated_p0 = tex->paddr + p0;
+ if (is_cs) {
+ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+ to_vc4_bo(&tex->base)->write_seqno);
+ }
+
return true;
fail:
DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
@@ -835,7 +842,8 @@ validate_gl_shader_rec(struct drm_device *dev,
if (!reloc_tex(exec,
uniform_data_u,
&validated_shader->texture_samples[tex],
- texture_handles_u[tex])) {
+ texture_handles_u[tex],
+ i == 2)) {
return -EINVAL;
}
}
@@ -867,6 +875,9 @@ validate_gl_shader_rec(struct drm_device *dev,
uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
uint32_t max_index;
+ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
+ to_vc4_bo(&vbo->base)->write_seqno);
+
if (state->addr & 0x8)
stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b..8a15c4aa8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -34,6 +34,24 @@
#define VMW_RES_HT_ORDER 12
+ /**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+ vmw_res_rel_normal,
+ vmw_res_rel_nop,
+ vmw_res_rel_cond_nop,
+ vmw_res_rel_max
+};
+
/**
* struct vmw_resource_relocation - Relocation info for resources
*
@@ -41,11 +59,13 @@
* @res: Non-ref-counted pointer to the resource.
* @offset: Offset of 4 byte entries into the command buffer where the
* id that needs fixup is located.
+ * @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
- unsigned long offset;
+ u32 offset:29;
+ enum vmw_resource_relocation_type rel_type:3;
};
/**
@@ -410,10 +430,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
* id that needs fixup is located. Granularity is 4 bytes.
+ * @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
- unsigned long offset)
+ unsigned long offset,
+ enum vmw_resource_relocation_type
+ rel_type)
{
struct vmw_resource_relocation *rel;
@@ -425,6 +448,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
+ rel->rel_type = rel_type;
list_add_tail(&rel->head, list);
return 0;
@@ -459,11 +483,23 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
+ /* Validate the struct vmw_resource_relocation member size */
+ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
+ switch (rel->rel_type) {
+ case vmw_res_rel_normal:
cb[rel->offset] = rel->res->id;
- else
+ break;
+ case vmw_res_rel_nop:
cb[rel->offset] = SVGA_3D_CMD_NOP;
+ break;
+ default:
+ if (rel->res->id == -1)
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ break;
+ }
}
}
@@ -655,7 +691,8 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id_loc - sw_context->buf_start);
+ id_loc - sw_context->buf_start,
+ vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
@@ -721,7 +758,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
+ id_loc - sw_context->buf_start,
+ vmw_res_rel_normal);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2144,7 +2182,8 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
- sw_context->buf_start);
+ sw_context->buf_start,
+ vmw_res_rel_nop);
return 0;
}
@@ -2189,7 +2228,8 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
- sw_context->buf_start);
+ sw_context->buf_start,
+ vmw_res_rel_nop);
return 0;
}
@@ -2848,8 +2888,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -2877,10 +2916,15 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
return ret;
/*
- * Add view to the validate list iff it was not created using this
- * command batch.
+ * If the view wasn't created during this command batch, it might
+ * have been removed due to a context swapout, so add a
+ * relocation to conditionally make this command a NOP to avoid
+ * device errors.
*/
- return vmw_view_res_val_add(sw_context, view);
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ view,
+ &cmd->header.id - sw_context->buf_start,
+ vmw_res_rel_cond_nop);
}
/**
@@ -3848,14 +3892,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
int ret;
*header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
-
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4ed9a4fdf..e92b09d32 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
#define USB_VENDOR_ID_AKAI 0x2011
#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+#define USB_VENDOR_ID_AKAI_09E8 0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index b4b8c6abb..bb400081e 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 9e02ac963..3978cbb6b 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -388,9 +388,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
err_misc_register:
coresight_unregister(drvdata->csdev);
err_devm_kzalloc:
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(dev, drvdata->size,
- drvdata->vaddr, drvdata->paddr);
return ret;
}
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 0fde593ec..5f7968232 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -655,7 +655,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
devnr = 0;
for_each_child_of_node(np, pp) {
- if (devnr > AD5755_NUM_CHANNELS) {
+ if (devnr >= AD5755_NUM_CHANNELS) {
dev_err(dev,
"There is to many channels defined in DT\n");
goto error_out;
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 20c40f780..18cf2e29e 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -894,7 +894,7 @@ static int us5182d_probe(struct i2c_client *client,
goto out_err;
if (data->default_continuous) {
- pm_runtime_set_active(&client->dev);
+ ret = pm_runtime_set_active(&client->dev);
if (ret < 0)
goto out_err;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index f2b776efa..5f88ccd68 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -821,7 +821,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (ret) {
pr_err("failed to init MR pool ret= %d\n", ret);
ib_destroy_qp(qp);
- qp = ERR_PTR(ret);
+ return ERR_PTR(ret);
}
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4e4d8317c..c17c9dd7c 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -808,6 +808,13 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
kfree(priv);
return ERR_PTR(-ENOMEM);
}
+ iowait_init(
+ &priv->s_iowait,
+ 1,
+ _hfi1_do_send,
+ iowait_sleep,
+ iowait_wakeup,
+ iowait_sdma_drained);
setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
qp->s_timer.function = hfi1_rc_timeout;
return priv;
@@ -873,13 +880,6 @@ void notify_qp_reset(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- iowait_init(
- &priv->s_iowait,
- 1,
- _hfi1_do_send,
- iowait_sleep,
- iowait_wakeup,
- iowait_sdma_drained);
priv->r_adefered = 0;
clear_ahg(qp);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e19537cf4..bff8707a2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1843,6 +1843,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
+ mlx5_del_flow_rule(handler->rule);
kfree(handler);
handler = handler_ucast;
} else {
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index bbf0a163a..54bb655f5 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1131,7 +1131,6 @@ extern spinlock_t qib_devs_lock;
extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
-extern u16 qpt_mask;
extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int);
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index f9b8cd235..99d31efe4 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -41,14 +41,6 @@
#include "qib.h"
-/*
- * mask field which was present in now deleted qib_qpn_table
- * is not present in rvt_qpn_table. Defining the same field
- * as qpt_mask here instead of adding the mask field to
- * rvt_qpn_table.
- */
-u16 qpt_mask;
-
static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map, unsigned off)
{
@@ -57,7 +49,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map, unsigned off,
- unsigned n)
+ unsigned n, u16 qpt_mask)
{
if (qpt_mask) {
off++;
@@ -179,6 +171,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
verbs_dev);
+ u16 qpt_mask = dd->qpn_mask;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -215,7 +208,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
goto bail;
}
offset = find_next_offset(qpt, map, offset,
- dd->n_krcv_queues);
+ dd->n_krcv_queues, qpt_mask);
qpn = mk_qpn(qpt, map, offset);
/*
* This test differs from alloc_pidmap().
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index fd1dfbce5..b2b845f9f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1606,8 +1606,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
/* Only need to initialize non-zero fields. */
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
- qpt_mask = dd->qpn_mask;
-
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
INIT_LIST_HEAD(&dev->txwait);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 870b4f212..5911c534c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -501,12 +501,9 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
- __releases(&qp->s_lock)
- __releases(&qp->s_hlock)
- __releases(&qp->r_lock)
- __acquires(&qp->r_lock)
- __acquires(&qp->s_hlock)
- __acquires(&qp->s_lock)
+ __must_hold(&qp->r_lock)
+ __must_hold(&qp->s_hlock)
+ __must_hold(&qp->s_lock)
{
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 3322ed750..6b07d4bca 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1400,7 +1400,9 @@ static int srp_map_sg_entry(struct srp_map_state *state,
while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask;
- if (state->npages == dev->max_pages_per_mr || offset != 0) {
+
+ if (state->npages == dev->max_pages_per_mr ||
+ (state->npages > 0 && offset != 0)) {
ret = srp_map_finish_fmr(state, ch);
if (ret)
return ret;
@@ -1417,12 +1419,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
}
/*
- * If the last entry of the MR wasn't a full page, then we need to
+ * If the end of the MR is not on a page boundary then we need to
* close it out and start a new one -- we can only merge at page
* boundaries.
*/
ret = 0;
- if (len != dev->mr_page_size)
+ if ((dma_addr & ~dev->mr_page_mask) != 0)
ret = srp_map_finish_fmr(state, ch);
return ret;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 08e252a42..ff8c10749 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu H760 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ },
+ },
#endif
{ }
};
@@ -1503,10 +1510,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
- /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ /* Fujitsu H760 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
{
@@ -1517,6 +1524,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index a5eed2ade..34da81c00 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
index ee1ad27d6..08a1c10a1 100644
--- a/drivers/input/serio/i8042-ip22io.h
+++ b/drivers/input/serio/i8042-ip22io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
index f708c75d1..1aabea433 100644
--- a/drivers/input/serio/i8042-ppcio.h
+++ b/drivers/input/serio/i8042-ppcio.h
@@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
static inline int i8042_platform_init(void)
{
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index afcd1c1a0..6231d6386 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
}
}
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
index 73f5cc124..455747552 100644
--- a/drivers/input/serio/i8042-unicore32io.h
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
return -EBUSY;
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 68f5f4a0f..f4bfb4b2d 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{ }
};
+/*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+ },
+ },
+ { }
+};
static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
/* MSI Wind U-100 */
@@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void)
return retval;
#if defined(__ia64__)
- i8042_reset = true;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (dmi_check_system(i8042_dmi_reset_table))
+ i8042_reset = I8042_RESET_ALWAYS;
+
+ if (dmi_check_system(i8042_dmi_noselftest_table))
+ i8042_reset = I8042_RESET_NEVER;
+ }
if (dmi_check_system(i8042_dmi_noloop_table))
i8042_noloop = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 405252a88..89abfdb53 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -48,9 +48,39 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
-static bool i8042_reset;
-module_param_named(reset, i8042_reset, bool, 0);
-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
+enum i8042_controller_reset_mode {
+ I8042_RESET_NEVER,
+ I8042_RESET_ALWAYS,
+ I8042_RESET_ON_S2RAM,
+#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
+};
+static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
+static int i8042_set_reset(const char *val, const struct kernel_param *kp)
+{
+ enum i8042_controller_reset_mode *arg = kp->arg;
+ int error;
+ bool reset;
+
+ if (val) {
+ error = kstrtobool(val, &reset);
+ if (error)
+ return error;
+ } else {
+ reset = true;
+ }
+
+ *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_reset_param = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = i8042_set_reset,
+};
+#define param_check_reset_param(name, p) \
+ __param_check(name, p, enum i8042_controller_reset_mode)
+module_param_named(reset, i8042_reset, reset_param, 0);
+MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
static bool i8042_direct;
module_param_named(direct, i8042_direct, bool, 0);
@@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(bool force_reset)
+static void i8042_controller_reset(bool s2r_wants_reset)
{
i8042_flush();
@@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
* Reset the controller if requested.
*/
- if (i8042_reset || force_reset)
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
i8042_controller_selftest();
+ }
/*
* Restore the original control register setting.
@@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
* before suspending.
*/
-static int i8042_controller_resume(bool force_reset)
+static int i8042_controller_resume(bool s2r_wants_reset)
{
int error;
@@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
if (error)
return error;
- if (i8042_reset || force_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
error = i8042_controller_selftest();
if (error)
return error;
@@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
static int i8042_pm_resume(struct device *dev)
{
- bool force_reset;
+ bool want_reset;
int i;
for (i = 0; i < I8042_NUM_PORTS; i++) {
@@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
* off control to the platform firmware, otherwise we can simply restore
* the mode.
*/
- force_reset = pm_resume_via_firmware();
+ want_reset = pm_resume_via_firmware();
- return i8042_controller_resume(force_reset);
+ return i8042_controller_resume(want_reset);
}
static int i8042_pm_thaw(struct device *dev)
@@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
i8042_platform_device = dev;
- if (i8042_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
return error;
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e430..ebc2b0b15 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
nps_ack_gic();
}
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
{
unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
.name = "NPS400 IC",
.irq_mask = nps400_irq_mask,
.irq_unmask = nps400_irq_unmask,
- .irq_eoi = nps400_irq_eoi,
+ .irq_ack = nps400_irq_ack,
};
static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index da6c0ba61..708a2604a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (count--) {
+ while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 874295757..6fc8923bd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -113,8 +113,7 @@ struct iv_tcw_private {
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
- DM_CRYPT_EXIT_THREAD};
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
/*
* The fields in here must be read only after initialization.
@@ -1207,18 +1206,20 @@ continue_locked:
if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
- if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
- spin_unlock_irq(&cc->write_thread_wait.lock);
- break;
- }
-
- __set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
__add_wait_queue(&cc->write_thread_wait, &wait);
spin_unlock_irq(&cc->write_thread_wait.lock);
+ if (unlikely(kthread_should_stop())) {
+ set_task_state(current, TASK_RUNNING);
+ remove_wait_queue(&cc->write_thread_wait, &wait);
+ break;
+ }
+
schedule();
+ set_task_state(current, TASK_RUNNING);
spin_lock_irq(&cc->write_thread_wait.lock);
__remove_wait_queue(&cc->write_thread_wait, &wait);
goto continue_locked;
@@ -1533,13 +1534,8 @@ static void crypt_dtr(struct dm_target *ti)
if (!cc)
return;
- if (cc->write_thread) {
- spin_lock_irq(&cc->write_thread_wait.lock);
- set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
- wake_up_locked(&cc->write_thread_wait);
- spin_unlock_irq(&cc->write_thread_wait.lock);
+ if (cc->write_thread)
kthread_stop(cc->write_thread);
- }
if (cc->io_queue)
destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index ac734e5bb..15db5e9c5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1521,10 +1521,10 @@ static void activate_path(struct work_struct *work)
{
struct pgpath *pgpath =
container_of(work, struct pgpath, activate_path.work);
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
- if (pgpath->is_active)
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, pgpath);
+ if (pgpath->is_active && !blk_queue_dying(q))
+ scsi_dh_activate(q, pg_init_done, pgpath);
else
pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1ca7463e8..5da86c8b6 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -73,15 +73,24 @@ static void dm_old_start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void dm_mq_start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+}
+
void dm_start_queue(struct request_queue *q)
{
if (!q->mq_ops)
dm_old_start_queue(q);
- else {
- queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
- blk_mq_start_stopped_hw_queues(q, true);
- blk_mq_kick_requeue_list(q);
- }
+ else
+ dm_mq_start_queue(q);
}
static void dm_old_stop_queue(struct request_queue *q)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fa9b1cb44..0f2928b31 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1873,6 +1873,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
+ struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -1883,6 +1884,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
+
if (dm_request_based(md) && md->kworker_task)
flush_kthread_worker(&md->kworker);
@@ -2249,10 +2254,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
int dm_resume(struct mapped_device *md)
{
- int r = -EINVAL;
+ int r;
struct dm_table *map = NULL;
retry:
+ r = -EINVAL;
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (!dm_suspended_md(md))
@@ -2276,8 +2282,6 @@ retry:
goto out;
clear_bit(DMF_SUSPENDED, &md->flags);
-
- r = 0;
out:
mutex_unlock(&md->suspend_lock);
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 41325328a..fe79358b0 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = {
};
static struct regdata mb86a20s_init2[] = {
- { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
+ { 0x50, 0xd1 }, { 0x51, 0x22 },
+ { 0x39, 0x01 },
+ { 0x71, 0x00 },
{ 0x3b, 0x21 },
- { 0x3c, 0x38 },
+ { 0x3c, 0x3a },
{ 0x01, 0x0d },
- { 0x04, 0x08 }, { 0x05, 0x03 },
+ { 0x04, 0x08 }, { 0x05, 0x05 },
{ 0x04, 0x0e }, { 0x05, 0x00 },
- { 0x04, 0x0f }, { 0x05, 0x37 },
- { 0x04, 0x0b }, { 0x05, 0x78 },
+ { 0x04, 0x0f }, { 0x05, 0x14 },
+ { 0x04, 0x0b }, { 0x05, 0x8c },
{ 0x04, 0x00 }, { 0x05, 0x00 },
- { 0x04, 0x01 }, { 0x05, 0x1e },
- { 0x04, 0x02 }, { 0x05, 0x07 },
- { 0x04, 0x03 }, { 0x05, 0xd0 },
+ { 0x04, 0x01 }, { 0x05, 0x07 },
+ { 0x04, 0x02 }, { 0x05, 0x0f },
+ { 0x04, 0x03 }, { 0x05, 0xa0 },
{ 0x04, 0x09 }, { 0x05, 0x00 },
{ 0x04, 0x0a }, { 0x05, 0xff },
- { 0x04, 0x27 }, { 0x05, 0x00 },
+ { 0x04, 0x27 }, { 0x05, 0x64 },
{ 0x04, 0x28 }, { 0x05, 0x00 },
- { 0x04, 0x1e }, { 0x05, 0x00 },
- { 0x04, 0x29 }, { 0x05, 0x64 },
- { 0x04, 0x32 }, { 0x05, 0x02 },
+ { 0x04, 0x1e }, { 0x05, 0xff },
+ { 0x04, 0x29 }, { 0x05, 0x0a },
+ { 0x04, 0x32 }, { 0x05, 0x0a },
{ 0x04, 0x14 }, { 0x05, 0x02 },
{ 0x04, 0x04 }, { 0x05, 0x00 },
{ 0x04, 0x05 }, { 0x05, 0x22 },
@@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = {
{ 0x04, 0x07 }, { 0x05, 0xd8 },
{ 0x04, 0x12 }, { 0x05, 0x00 },
{ 0x04, 0x13 }, { 0x05, 0xff },
- { 0x04, 0x15 }, { 0x05, 0x4e },
- { 0x04, 0x16 }, { 0x05, 0x20 },
/*
* On this demod, when the bit count reaches the count below,
@@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = {
{ 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
{ 0x45, 0x04 }, /* CN symbol 4 */
{ 0x48, 0x04 }, /* CN manual mode */
-
+ { 0x50, 0xd5 }, { 0x51, 0x01 },
{ 0x50, 0xd6 }, { 0x51, 0x1f },
{ 0x50, 0xd2 }, { 0x51, 0x03 },
- { 0x50, 0xd7 }, { 0x51, 0xbf },
- { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
- { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
-
- { 0x04, 0x40 }, { 0x05, 0x00 },
- { 0x28, 0x00 }, { 0x2b, 0x08 },
- { 0x28, 0x05 }, { 0x2b, 0x00 },
+ { 0x50, 0xd7 }, { 0x51, 0x3f },
{ 0x1c, 0x01 },
- { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
- { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
- { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
- { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
- { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
- { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
- { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
- { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
- { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
- { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
- { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
- { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
- { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
- { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
- { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
- { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
- { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
- { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
- { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
- { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
- { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
- { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
- { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
- { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
- { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
+ { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
+ { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
+ { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
+ { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
+ { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
+ { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
+ { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
+ { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
+ { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
+ { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
+ { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
+ { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
+ { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
+ { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
+ { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
+ { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
+ { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
+ { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
+ { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
+ { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
+ { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
+ { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
+ { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
+ { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
+ { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
{ 0x50, 0x1e }, { 0x51, 0x5d },
{ 0x50, 0x22 }, { 0x51, 0x00 },
{ 0x50, 0x23 }, { 0x51, 0xc8 },
@@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = {
{ 0x50, 0x26 }, { 0x51, 0x00 },
{ 0x50, 0x27 }, { 0x51, 0xc3 },
{ 0x50, 0x39 }, { 0x51, 0x02 },
- { 0xec, 0x0f },
- { 0xeb, 0x1f },
- { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
+ { 0x50, 0xd5 }, { 0x51, 0x01 },
{ 0xd0, 0x00 },
};
@@ -318,7 +310,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status)
if (val >= 7)
*status |= FE_HAS_SYNC;
- if (val >= 8) /* Maybe 9? */
+ /*
+ * Actually, on state S8, it starts receiving TS, but the TS
+ * output is only on normal state after the transition to S9.
+ */
+ if (val >= 9)
*status |= FE_HAS_LOCK;
dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
@@ -2058,6 +2054,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
kfree(state);
}
+static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
static struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
@@ -2130,6 +2131,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
.read_status = mb86a20s_read_status_and_stats,
.read_signal_strength = mb86a20s_read_signal_strength_from_cache,
.tune = mb86a20s_tune,
+ .get_frontend_algo = mb86a20s_get_frontend_algo,
};
MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index 491913778..2f52d66b4 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
dev->board.agc_analog_digital_select_gpio,
analog_or_digital);
- return status;
+ if (status < 0)
+ return status;
+
+ return 0;
}
int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 3637e7cc4..48fe08cca 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = {
.output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
+ .agc_analog_digital_select_gpio = 0x1c,
.tuner_sif_gpio = -1,
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 630f4fc51..ea9a99e41 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
break;
case CX231XX_BOARD_CNXT_RDE_253S:
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
break;
case CX231XX_BOARD_HAUPPAUGE_EXETER:
@@ -738,7 +739,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
break;
@@ -1301,15 +1302,29 @@ int cx231xx_dev_init(struct cx231xx *dev)
dev->i2c_bus[2].i2c_reserve = 0;
/* register I2C buses */
- cx231xx_i2c_register(&dev->i2c_bus[0]);
- cx231xx_i2c_register(&dev->i2c_bus[1]);
- cx231xx_i2c_register(&dev->i2c_bus[2]);
+ errCode = cx231xx_i2c_register(&dev->i2c_bus[0]);
+ if (errCode < 0)
+ return errCode;
+ errCode = cx231xx_i2c_register(&dev->i2c_bus[1]);
+ if (errCode < 0)
+ return errCode;
+ errCode = cx231xx_i2c_register(&dev->i2c_bus[2]);
+ if (errCode < 0)
+ return errCode;
errCode = cx231xx_i2c_mux_create(dev);
+ if (errCode < 0) {
+ dev_err(dev->dev,
+ "%s: Failed to create I2C mux\n", __func__);
+ return errCode;
+ }
+ errCode = cx231xx_i2c_mux_register(dev, 0);
+ if (errCode < 0)
+ return errCode;
+
+ errCode = cx231xx_i2c_mux_register(dev, 1);
if (errCode < 0)
return errCode;
- cx231xx_i2c_mux_register(dev, 0);
- cx231xx_i2c_mux_register(dev, 1);
/* scan the real bus segments in the order of physical port numbers */
cx231xx_do_i2c_scan(dev, I2C_0);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index d34bc3530..2e3cf012e 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
int rc;
if (!host->req) {
+ pm_runtime_get_sync(ms_dev(host));
do {
rc = memstick_next_req(msh, &host->req);
dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
host->req->error);
}
} while (!rc);
+ pm_runtime_put(ms_dev(host));
}
}
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
}
out:
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(ms_dev(host));
/* power-on delay */
if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
int err;
for (;;) {
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
/* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
}
poll_again:
+ pm_runtime_put(ms_dev(host));
if (host->eject)
break;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index f3d34b941..af23d7dfe 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (ctx->status == STARTED)
goto out; /* already started */
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc)
+ goto out;
+
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -240,6 +248,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
+ cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c466ee2b0..5e506c191 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
put_pid(ctx->glpid);
cxl_ctx_put();
+
+ /* Decrease the attached context count on the adapter */
+ cxl_adapter_context_put(ctx->afu->adapter);
return 0;
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 344a0ff8f..19aa2aca9 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -615,6 +615,14 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+
+ /*
+ * number of contexts mapped on to this card. Possible values are:
+ * >0: Number of contexts mapped and new one can be mapped.
+ * 0: No active contexts and new ones can be mapped.
+ * -1: No contexts mapped and new ones cannot be mapped.
+ */
+ atomic_t contexts_num;
};
int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -940,4 +948,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
/* decode AFU error bits in the PSL register PSL_SERR_An */
void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
+
+/*
+ * Increments the number of attached contexts on an adapter.
+ * In case an adapter_context_lock is taken the return -EBUSY.
+ */
+int cxl_adapter_context_get(struct cxl *adapter);
+
+/* Decrements the number of attached contexts on an adapter */
+void cxl_adapter_context_put(struct cxl *adapter);
+
+/* If no active contexts then prevents contexts from being attached */
+int cxl_adapter_context_lock(struct cxl *adapter);
+
+/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
+void cxl_adapter_context_unlock(struct cxl *adapter);
+
#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 5fb9894b1..d0b421f49 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -205,11 +205,22 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_adapter_context_put(ctx->afu->adapter);
goto out;
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9aa58a77a..3e102cd6e 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_put1;
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
return adapter;
err_put1:
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index d9be23b24..62e0dfb5f 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
goto err2;
- return adapter;
+ /* start with context lock taken */
+ atomic_set(&adapter->contexts_num, -1);
+ return adapter;
err2:
cxl_remove_adapter_nr(adapter);
err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
return 0;
}
+int cxl_adapter_context_get(struct cxl *adapter)
+{
+ int rc;
+
+ rc = atomic_inc_unless_negative(&adapter->contexts_num);
+ return rc >= 0 ? 0 : -EBUSY;
+}
+
+void cxl_adapter_context_put(struct cxl *adapter)
+{
+ atomic_dec_if_positive(&adapter->contexts_num);
+}
+
+int cxl_adapter_context_lock(struct cxl *adapter)
+{
+ int rc;
+ /* no active contexts -> contexts_num == 0 */
+ rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
+ return rc ? -EBUSY : 0;
+}
+
+void cxl_adapter_context_unlock(struct cxl *adapter)
+{
+ int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
+
+ /*
+ * contexts lock taken -> contexts_num == -1
+ * If not true then show a warning and force reset the lock.
+ * This will happen when context_unlock was requested without
+ * doing a context_lock.
+ */
+ if (val != -1) {
+ atomic_set(&adapter->contexts_num, 0);
+ WARN(1, "Adapter context unlocked with %d active contexts",
+ val);
+ }
+}
+
static int __init init_cxl(void)
{
int rc = 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 6f0c4ac4b..8ad4e4f6f 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1484,6 +1484,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
return 0;
err:
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index b043c20f1..a8b6d6a63 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
int val;
rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1))
+ if ((rc != 1) || (val != 1 && val != -1))
return -EINVAL;
- if ((rc = cxl_ops->adapter_reset(adapter)))
- return rc;
- return count;
+ /*
+ * See if we can lock the context mapping that's only allowed
+ * when there are no contexts attached to the adapter. Once
+ * taken this will also prevent any context from getting activated.
+ */
+ if (val == 1) {
+ rc = cxl_adapter_context_lock(adapter);
+ if (rc)
+ goto out;
+
+ rc = cxl_ops->adapter_reset(adapter);
+ /* In case reset failed release context lock */
+ if (rc)
+ cxl_adapter_context_unlock(adapter);
+
+ } else if (val == -1) {
+ /* Perform a forced adapter reset */
+ rc = cxl_ops->adapter_reset(adapter);
+ }
+
+out:
+ return rc ? rc : count;
}
static ssize_t load_image_on_perst_show(struct device *device,
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index fd9271bc1..cd01e342b 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -139,7 +139,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
return -ERESTARTSYS;
if (!mei_cl_is_connected(cl)) {
- rets = -EBUSY;
+ rets = -ENODEV;
goto out;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index e094df3cf..5b5b2e07e 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -142,7 +142,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
mutex_lock(&bus->device_lock);
if (!mei_cl_is_connected(cl)) {
- rets = -EBUSY;
+ rets = -ENODEV;
goto out;
}
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 0dcb854b4..7ad15d678 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -125,6 +125,9 @@
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
+#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 52635b063..080208dc5 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -202,7 +202,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
- rets = -EBUSY;
+ rets = -ENODEV;
goto out;
}
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 71cea9b29..5eb9b75ae 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -91,6 +91,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2206d4477..17891f17f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1778,7 +1778,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->data;
struct mmc_packed *packed = mqrq->packed;
bool do_rel_wr, do_data_tag;
- u32 *packed_cmd_hdr;
+ __le32 *packed_cmd_hdr;
u8 hdr_blocks;
u8 i = 1;
@@ -2303,7 +2303,8 @@ again:
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
- if (mmc_card_mmc(card) ||
+ if ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index fee5e1271..7f16709a5 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -31,7 +31,7 @@ enum mmc_packed_type {
struct mmc_packed {
struct list_head list;
- u32 cmd_hdr[1024];
+ __le32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f2d185cf8..c57eb32dc 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1259,6 +1259,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto out_err;
+
err = mmc_select_bus_width(card);
if (err < 0)
goto out_err;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 6c71fc9f7..da9f71b8d 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
mutex_lock(&ucr->dev_mutex);
- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
- mutex_unlock(&ucr->dev_mutex);
- return;
- }
-
sd_set_power_mode(host, ios->power_mode);
sd_set_bus_width(host, ios->bus_width);
sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
container_of(work, struct rtsx_usb_sdmmc, led_work);
struct rtsx_ucr *ucr = host->ucr;
+ pm_runtime_get_sync(sdmmc_dev(host));
mutex_lock(&ucr->dev_mutex);
if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
rtsx_usb_turn_on_led(ucr);
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(sdmmc_dev(host));
}
#endif
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cd65d474a..a8a022a73 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* host->clock is in Hz. target_timeout is in us.
* Hence, us = 1000000 * cycles / Hz. Round up.
*/
- val = 1000000 * data->timeout_clks;
+ val = 1000000ULL * data->timeout_clks;
if (do_div(val, host->clock))
target_timeout++;
target_timeout += val;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f4533266d..b419c7cfd 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -644,7 +644,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
- int vol_id = -1, lnum = -1;
+ int erase = 0, keep = 0, vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
@@ -780,6 +780,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
e1->pnum);
scrubbing = 1;
goto out_not_moved;
+ } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
+ /*
+ * While a full scan would detect interrupted erasures
+ * at attach time we can face them here when attached from
+ * Fastmap.
+ */
+ dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
+ e1->pnum);
+ erase = 1;
+ goto out_not_moved;
}
ubi_err(ubi, "error %d while reading VID header from PEB %d",
@@ -815,6 +825,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* Target PEB had bit-flips or write error - torture it.
*/
torture = 1;
+ keep = 1;
goto out_not_moved;
}
@@ -901,7 +912,7 @@ out_not_moved:
ubi->erroneous_peb_count += 1;
} else if (scrubbing)
wl_tree_add(e1, &ubi->scrub);
- else
+ else if (keep)
wl_tree_add(e1, &ubi->used);
if (dst_leb_clean) {
wl_tree_add(e2, &ubi->free);
@@ -922,6 +933,12 @@ out_not_moved:
goto out_ro;
}
+ if (erase) {
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
+ if (err)
+ goto out_ro;
+ }
+
mutex_unlock(&ubi->move_mutex);
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 618f18436..c65e17fae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -1009,7 +1009,6 @@ int i40e_unregister_client(struct i40e_client *client)
if (!i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
- mutex_unlock(&i40e_client_mutex);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index dad15b6c6..c74d16409 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7990,45 +7990,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{
- struct i40e_aqc_get_set_rss_key_data rss_key;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- bool pf_lut = false;
- u8 *rss_lut;
- int ret, i;
-
- memcpy(&rss_key, seed, sizeof(rss_key));
-
- rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
- if (!rss_lut)
- return -ENOMEM;
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0; i < vsi->rss_table_size; i++)
- rss_lut[i] = i % vsi->rss_size;
+ int ret = 0;
- ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto config_rss_aq_out;
+ if (seed) {
+ struct i40e_aqc_get_set_rss_key_data *seed_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)seed;
+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
}
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
- if (vsi->type == I40E_VSI_MAIN)
- pf_lut = true;
-
- ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
- vsi->rss_table_size);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-
-config_rss_aq_out:
- kfree(rss_lut);
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 9fb8d7472..da9998ea9 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -433,6 +433,13 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
u32 ctrl_addr = pipe->ctrl_addr;
+ u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+
+ /* Prevent CE ring stuck issue that will occur when ring is full.
+ * Make sure that write index is 1 less than read index.
+ */
+ if ((cur_write_idx + nentries) == dest_ring->sw_index)
+ nentries -= 1;
write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 24c8d65bc..09ca63466 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2394,6 +2394,8 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
+ ath10k_mac_tx_push_pending(ar);
+
spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0bbd0a00e..146365b93 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3777,7 +3777,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
enum ath10k_hw_txrx_mode txmode;
enum ath10k_mac_tx_path txpath;
struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
size_t skb_len;
+ bool is_mgmt, is_presp;
int ret;
spin_lock_bh(&ar->htt.tx_lock);
@@ -3801,6 +3803,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_mgmt) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+ if (ret) {
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return ret;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
if (unlikely(ret)) {
@@ -3808,6 +3826,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
return ret;
@@ -6538,7 +6558,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
goto exit;
}
- ath10k_mac_update_bss_chan_survey(ar, survey->channel);
+ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index b29a86a26..28ff5cb4e 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
- ath10k_mac_tx_push_pending(ar);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 3ef468893..f67cc198b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -180,6 +180,7 @@ enum wmi_service {
WMI_SERVICE_MESH_NON_11S,
WMI_SERVICE_PEER_STATS,
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_PULL,
WMI_SERVICE_TX_MODE_DYNAMIC,
@@ -305,6 +306,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_10_4_SERVICE_PEER_STATS,
WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
@@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_MESH_NON_11S);
SVCSTR(WMI_SERVICE_PEER_STATS);
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
@@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PEER_STATS, len);
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 43f8f7d45..adba3b003 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -564,11 +564,16 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
- /* If OEM did not fuse address - get it from OTP */
- if (!mac_addr0 && !mac_addr1) {
- mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
- mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
- }
+ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+ /*
+ * If the OEM fused a valid address, use it instead of the one in the
+ * OTP
+ */
+ if (is_valid_ether_addr(data->hw_addr))
+ return;
+
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 7e0cdbf8b..794c57486 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1214,9 +1214,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* TODO: read the budget from BIOS / Platform NVM */
- if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
+ if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
mvm->cooling_dev.cur_state);
+ if (ret)
+ goto error;
+ }
#else
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 69c42ce45..d742d27d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -539,6 +539,11 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MVM_OFFCHANNEL_QUEUE,
IWL_MAX_TID_COUNT, 0);
+ else
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
break;
case NL80211_IFTYPE_AP:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index df6c32caa..afb7eb60e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -598,9 +598,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
- /* not a data packet */
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1))
+ /* not a data packet or a bar */
+ if (!ieee80211_is_back_req(hdr->frame_control) &&
+ (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)))
return false;
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
@@ -624,6 +625,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
spin_lock_bh(&buffer->lock);
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ goto drop;
+ }
+
/*
* If there was a significant jump in the nssn - adjust.
* If the SN is smaller than the NSSN it might need to first go into
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 3130b9c68..e933c12d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -576,9 +576,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
ret);
/* Make sure the SCD wrptr is correctly set before reconfiguring */
- iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
- cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
- ssn, wdg_timeout);
+ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
@@ -1270,9 +1268,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ u8 reserved_txq = mvm_sta->reserved_queue;
+ enum iwl_mvm_queue_status *status;
+
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+ /*
+ * If no traffic has gone through the reserved TXQ - it
+ * is still marked as IWL_MVM_QUEUE_RESERVED, and
+ * should be manually marked as free again
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ status = &mvm->queue_info[reserved_txq].status;
+ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+ (*status != IWL_MVM_QUEUE_FREE),
+ "sta_id %d reserved txq %d status %d",
+ mvm_sta->sta_id, reserved_txq, *status)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -EINVAL;
+ }
+
+ *status = IWL_MVM_QUEUE_FREE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ }
+
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
/* if associated - we can't remove the AP STA now */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index b3a87a31d..a0c1e3d07 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -903,9 +903,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
tid = IWL_MAX_TID_COUNT;
}
- if (iwl_mvm_is_dqa_supported(mvm))
+ if (iwl_mvm_is_dqa_supported(mvm)) {
txq_id = mvmsta->tid_data[tid].txq_id;
+ if (ieee80211_is_mgmt(fc))
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ }
+
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 1c7b00630..b89596c18 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
- memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
-
assoc_rsp->a_id = cpu_to_le16(aid);
+ memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index a422f3306..7e394d485 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -708,7 +708,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_EXT_SCAN_REPORT:
mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
- if (adapter->ext_scan && !priv->scan_aborting)
+ /* We intend to skip this event during suspend, but handle
+ * it in interface disabled case
+ */
+ if (adapter->ext_scan && (!priv->scan_aborting ||
+ !netif_running(priv->netdev)))
ret = mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7cf26c612..6005e1421 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -831,8 +831,10 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
sizeof(struct usb_anchor),
GFP_KERNEL);
- if (!rt2x00dev->anchor)
+ if (!rt2x00dev->anchor) {
+ retval = -ENOMEM;
goto exit_free_reg;
+ }
init_usb_anchor(rt2x00dev->anchor);
return 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 3524441fd..6ee6bf8e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_no_midband;
case COUNTRY_CODE_IC:
return &rtl_regdom_11;
- case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_TELEC_NETGEAR:
return &rtl_regdom_60_64;
+ case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
@@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
return COUNTRY_CODE_WORLD_WIDE_13;
case 0x22:
return COUNTRY_CODE_IC;
+ case 0x25:
+ return COUNTRY_CODE_ETSI;
case 0x32:
return COUNTRY_CODE_TELEC_NETGEAR;
case 0x41:
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 935866fe5..a8b6949a8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -217,6 +217,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
return rc;
if (cmd_rc < 0)
return cmd_rc;
+
+ nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
return clear_err.cleared;
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 4d7bbd2df..7ceba0877 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -547,11 +547,12 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
-static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
+ gfp_t flags)
{
struct nd_poison *pl;
- pl = kzalloc(sizeof(*pl), GFP_KERNEL);
+ pl = kzalloc(sizeof(*pl), flags);
if (!pl)
return -ENOMEM;
@@ -567,7 +568,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
struct nd_poison *pl;
if (list_empty(&nvdimm_bus->poison_list))
- return add_poison(nvdimm_bus, addr, length);
+ return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
/*
* There is a chance this is a duplicate, check for those first.
@@ -587,7 +588,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
* as any overlapping ranges will get resolved when the list is consumed
* and converted to badblocks
*/
- return add_poison(nvdimm_bus, addr, length);
+ return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
@@ -602,6 +603,70 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
+void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t start, unsigned int len)
+{
+ struct list_head *poison_list = &nvdimm_bus->poison_list;
+ u64 clr_end = start + len - 1;
+ struct nd_poison *pl, *next;
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ WARN_ON_ONCE(list_empty(poison_list));
+
+ /*
+ * [start, clr_end] is the poison interval being cleared.
+ * [pl->start, pl_end] is the poison_list entry we're comparing
+ * the above interval against. The poison list entry may need
+ * to be modified (update either start or length), deleted, or
+ * split into two based on the overlap characteristics
+ */
+
+ list_for_each_entry_safe(pl, next, poison_list, list) {
+ u64 pl_end = pl->start + pl->length - 1;
+
+ /* Skip intervals with no intersection */
+ if (pl_end < start)
+ continue;
+ if (pl->start > clr_end)
+ continue;
+ /* Delete completely overlapped poison entries */
+ if ((pl->start >= start) && (pl_end <= clr_end)) {
+ list_del(&pl->list);
+ kfree(pl);
+ continue;
+ }
+ /* Adjust start point of partially cleared entries */
+ if ((start <= pl->start) && (clr_end > pl->start)) {
+ pl->length -= clr_end - pl->start + 1;
+ pl->start = clr_end + 1;
+ continue;
+ }
+ /* Adjust pl->length for partial clearing at the tail end */
+ if ((pl->start < start) && (pl_end <= clr_end)) {
+ /* pl->start remains the same */
+ pl->length = start - pl->start;
+ continue;
+ }
+ /*
+ * If clearing in the middle of an entry, we split it into
+ * two by modifying the current entry to represent one half of
+ * the split, and adding a new entry for the second half.
+ */
+ if ((pl->start < start) && (pl_end > clr_end)) {
+ u64 new_start = clr_end + 1;
+ u64 new_len = pl_end - new_start + 1;
+
+ /* Add new entry covering the right half */
+ add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
+ /* Adjust this entry to cover the left half */
+ pl->length = start - pl->start;
+ continue;
+ }
+ }
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index ef9893fa3..4f5e567fd 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -848,7 +848,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
int err, res_valid = 0;
struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
resource_size_t iobase;
INIT_LIST_HEAD(&pcie->resources);
@@ -862,7 +862,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, &pcie->resources) {
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
@@ -874,9 +874,11 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
lower_32_bits(res->start),
OB_PCIE_IO);
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
advk_pcie_set_ob_win(pcie, 0,
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 9d9d34e95..61eb4d46e 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -29,7 +29,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
int err, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
if (err)
@@ -39,15 +39,17 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
- resource_list_for_each_entry(win, resources) {
+ resource_list_for_each_entry_safe(win, tmp, resources) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 6de0757b1..7ec1e8000 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -621,7 +621,11 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
if (err < 0)
return err;
- pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
+ err = pci_remap_iospace(&pcie->pio, pcie->io.start);
+ if (!err)
+ pci_add_resource_offset(&sys->resources, &pcie->pio,
+ sys->io_offset);
+
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
@@ -631,7 +635,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
if (err < 0)
return err;
- pci_remap_iospace(&pcie->pio, pcie->io.start);
return 1;
}
@@ -856,7 +859,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
/* override IDDQ */
value = pads_readl(pcie, PADS_CTL);
value |= PADS_CTL_IDDQ_1L;
- pads_writel(pcie, PADS_CTL, value);
+ pads_writel(pcie, value, PADS_CTL);
/* reset PLL */
value = pads_readl(pcie, soc->pads_pll_ctl);
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index f23440577..b7dc07002 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -74,7 +74,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
int err, mem = 1, res_valid = 0;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
if (err)
@@ -84,15 +84,17 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, res) {
+ resource_list_for_each_entry_safe(win, tmp, res) {
struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+ resource_list_destroy_entry(win);
+ }
break;
case IORESOURCE_MEM:
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 12afce198..2a500f270 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -436,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct resource *cfg_res;
int i, ret;
LIST_HEAD(res);
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@@ -457,17 +457,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
goto error;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
+ resource_list_for_each_entry_safe(win, tmp, &res) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- ret = pci_remap_iospace(pp->io, pp->io_base);
- if (ret)
+ ret = pci_remap_iospace(win->res, pp->io_base);
+ if (ret) {
dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
- ret, pp->io);
+ ret, win->res);
+ resource_list_destroy_entry(win);
+ } else {
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ }
break;
case IORESOURCE_MEM:
pp->mem = win->res;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 65db7a221..5f7fcc971 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -945,7 +945,7 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
resource_size_t iobase;
- struct resource_entry *win;
+ struct resource_entry *win, *tmp;
err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
if (err)
@@ -955,14 +955,17 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
if (err)
goto out_release_res;
- resource_list_for_each_entry(win, &pci->resources) {
+ resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
struct resource *res = win->res;
if (resource_type(res) == IORESOURCE_IO) {
err = pci_remap_iospace(res, iobase);
- if (err)
+ if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
+
+ resource_list_destroy_entry(win);
+ }
}
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 44e0ff374..4bf1a88d7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3198,6 +3198,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe2e..71bbeb932 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
+ raw_spin_lock_init(&vg->lock);
+
ret = byt_gpio_probe(vg);
if (ret) {
pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, vg);
- raw_spin_lock_init(&vg->lock);
pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 257cab129..2b5b20bf7 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "../core.h"
#include "pinctrl-intel.h"
/* Offset from regs */
@@ -1079,6 +1080,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
#ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+ if (!pd || !intel_pad_usable(pctrl, pin))
+ return false;
+
+ /*
+ * Only restore the pin if it is actually in use by the kernel (or
+ * by userspace). It is possible that some pins are used by the
+ * BIOS during resume and those are not always locked down so leave
+ * them alone.
+ */
+ if (pd->mux_owner || pd->gpio_owner ||
+ gpiochip_line_is_irq(&pctrl->chip, pin))
+ return true;
+
+ return false;
+}
+
int intel_pinctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1092,7 +1113,7 @@ int intel_pinctrl_suspend(struct device *dev)
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1153,7 +1174,7 @@ int intel_pinctrl_resume(struct device *dev)
void __iomem *padcfg;
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 51c42d746..775c88303 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -156,7 +156,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
- val &= mask;
+ val &= ~mask;
val |= i << g->mux_bit;
writel(val, pctrl->regs + g->ctl_reg);
diff --git a/drivers/power/bq24257_charger.c b/drivers/power/bq24257_charger.c
index 1fea2c7ef..6fc31bdc6 100644
--- a/drivers/power/bq24257_charger.c
+++ b/drivers/power/bq24257_charger.c
@@ -1068,6 +1068,12 @@ static int bq24257_probe(struct i2c_client *client,
return ret;
}
+ ret = bq24257_power_supply_init(bq);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register power supply\n");
+ return ret;
+ }
+
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq24257_irq_handler_thread,
IRQF_TRIGGER_FALLING |
@@ -1078,12 +1084,6 @@ static int bq24257_probe(struct i2c_client *client,
return ret;
}
- ret = bq24257_power_supply_init(bq);
- if (ret < 0) {
- dev_err(dev, "Failed to register power supply\n");
- return ret;
- }
-
ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group);
if (ret < 0) {
dev_err(dev, "Can't create sysfs entries\n");
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index fb991ec76..696116ebd 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev)
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
pmic->ext_sleep_control = tps65910_ext_sleep_control;
info = tps65910_regs;
+ /* Work around silicon erratum SWCZ010: output programmed
+ * voltage level can go higher than expected or crash
+ * Workaround: use no synchronization of DCDC clocks
+ */
+ tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
+ DCDCCTRL_DCDCCKSYNC_MASK);
break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 6b1577c73..285b4006f 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp)
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
- if (s->len >= cp->view.cols - 5)
+ if (s->len < 4) {
+ /* This indicates a bug, but printing a warning would
+ * cause a deadlock. */
+ return;
+ }
+ if (s->string[s->len - 4] != TO_RA)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
@@ -460,11 +465,11 @@ con3270_cline_end(struct con3270 *cp)
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
- if (s->len < cp->view.cols - 5) {
+ if (cp->cline->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
- while (--size > cp->cline->len)
+ while (--size >= cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 940e725bd..11674698b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -95,12 +95,13 @@ struct chsc_ssd_area {
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{
struct chsc_ssd_area *ssd_area;
+ unsigned long flags;
int ccode;
int ret;
int i;
int mask;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
ssd_area = chsc_page;
ssd_area->request.length = 0x0010;
@@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
ssd->fla[i] = ssd_area->fla[i];
}
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 fmt : 4;
u32 : 16;
} __attribute__ ((packed)) *secm_area;
+ unsigned long flags;
int ret, ccode;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
secm_area = chsc_page;
secm_area->request.length = 0x0050;
@@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -992,6 +994,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
int chsc_get_channel_measurement_chars(struct channel_path *chp)
{
+ unsigned long flags;
int ccode, ret;
struct {
@@ -1021,7 +1024,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
return -EINVAL;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
scmc_area = chsc_page;
scmc_area->request.length = 0x0010;
@@ -1053,7 +1056,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
(struct cmg_chars *) &scmc_area->data);
out:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
}
@@ -1134,6 +1137,7 @@ struct css_chsc_char css_chsc_characteristics;
int __init
chsc_determine_css_characteristics(void)
{
+ unsigned long flags;
int result;
struct {
struct chsc_header request;
@@ -1146,7 +1150,7 @@ chsc_determine_css_characteristics(void)
u32 chsc_char[508];
} __attribute__ ((packed)) *scsc_area;
- spin_lock_irq(&chsc_page_lock);
+ spin_lock_irqsave(&chsc_page_lock, flags);
memset(chsc_page, 0, PAGE_SIZE);
scsc_area = chsc_page;
scsc_area->request.length = 0x0010;
@@ -1168,7 +1172,7 @@ chsc_determine_css_characteristics(void)
CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
scsc_area->response.code);
exit:
- spin_unlock_irq(&chsc_page_lock);
+ spin_unlock_irqrestore(&chsc_page_lock, flags);
return result;
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 5d7fbe4e9..581001989 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request for which a response was received
*/
-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
+ rec->u.res.port_handle = q_head->port_handle;
+ rec->u.res.lun_handle = q_head->lun_handle;
memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
FSF_PROT_STATUS_QUAL_SIZE);
@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
rec->pl_len, "fsf_res", req->req_id);
}
- debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
if (sdev) {
rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
rec->lun = zfcp_scsi_dev_lun(sdev);
- }
+ } else
+ rec->lun = ZFCP_DBF_INVALID_LUN;
}
/**
@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+ u64 req_id)
+{
+ struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbf->rec_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ rec->id = ZFCP_DBF_REC_RUN;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->port_status = wka_port->status;
+ rec->d_id = wka_port->d_id;
+ rec->lun = ZFCP_DBF_INVALID_LUN;
+
+ rec->u.run.fsf_req_id = req_id;
+ rec->u.run.rec_status = ~0;
+ rec->u.run.rec_step = ~0;
+ rec->u.run.rec_action = ~0;
+ rec->u.run.rec_count = ~0;
+
+ debug_event(dbf->rec, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
static inline
-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
- u64 req_id, u32 d_id)
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+ char *paytag, struct scatterlist *sg, u8 id, u16 len,
+ u64 req_id, u32 d_id, u16 cap_len)
{
struct zfcp_dbf_san *rec = &dbf->san_buf;
u16 rec_len;
unsigned long flags;
+ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+ u16 pay_sum = 0;
spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
rec->id = id;
rec->fsf_req_id = req_id;
rec->d_id = d_id;
- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
- memcpy(rec->payload, data, rec_len);
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->pl_len = len; /* full length even if we cap pay below */
+ if (!sg)
+ goto out;
+ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+ if (len <= rec_len)
+ goto out; /* skip pay record if full content in rec->payload */
+
+ /* if (len > rec_len):
+ * dump data up to cap_len ignoring small duplicate in rec->payload
+ */
+ spin_lock(&dbf->pay_lock);
+ memset(payload, 0, sizeof(*payload));
+ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+ payload->fsf_req_id = req_id;
+ payload->counter = 0;
+ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+ u16 pay_len, offset = 0;
+
+ while (offset < sg->length && pay_sum < cap_len) {
+ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+ (u16)(sg->length - offset));
+ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+ memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+ debug_event(dbf->pay, 1, payload,
+ zfcp_dbf_plen(pay_len));
+ payload->counter++;
+ offset += pay_len;
+ pay_sum += pay_len;
+ }
+ }
+ spin_unlock(&dbf->pay_lock);
+out:
debug_event(dbf->san, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
- fsf->req_id, d_id);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+ length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ struct zfcp_fsf_req *fsf,
+ u16 len)
+{
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+ struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_gpn_ft_resp *acc;
+ int max_entries, x, last = 0;
+
+ if (!(memcmp(tag, "fsscth2", 7) == 0
+ && ct_els->d_id == FC_FID_DIR_SERV
+ && reqh->ct_rev == FC_CT_REV
+ && reqh->ct_in_id[0] == 0
+ && reqh->ct_in_id[1] == 0
+ && reqh->ct_in_id[2] == 0
+ && reqh->ct_fs_type == FC_FST_DIR
+ && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+ && reqh->ct_options == 0
+ && reqh->_ct_resvd1 == 0
+ && reqh->ct_cmd == FC_NS_GPN_FT
+ /* reqh->ct_mr_size can vary so do not match but read below */
+ && reqh->_ct_resvd2 == 0
+ && reqh->ct_reason == 0
+ && reqh->ct_explan == 0
+ && reqh->ct_vendor == 0
+ && reqn->fn_resvd == 0
+ && reqn->fn_domain_id_scope == 0
+ && reqn->fn_area_id_scope == 0
+ && reqn->fn_fc4_type == FC_TYPE_FCP))
+ return len; /* not GPN_FT response so do not cap */
+
+ acc = sg_virt(resp_entry);
+ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+ * to account for header as 1st pseudo "entry" */;
+
+ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+ * response, allowing us to skip special handling for it - just skip it
+ */
+ for (x = 1; x < max_entries && !last; x++) {
+ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+ acc++;
+ else
+ acc = sg_virt(++resp_entry);
+
+ last = acc->fp_flags & FC_NS_FID_LAST;
+ }
+ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+ return len; /* cap after last entry */
}
/**
@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
- fsf->req_id, 0);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+ length, fsf->req_id, ct_els->d_id,
+ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
}
/**
@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
struct fsf_status_read_buffer *srb =
(struct fsf_status_read_buffer *) fsf->data;
u16 length;
+ struct scatterlist sg;
length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
- fsf->req_id, ntoh24(srb->d_id));
+ sg_init_one(&sg, srb->payload.data, length);
+ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+ fsf->req_id, ntoh24(srb->d_id), length);
}
/**
@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
* @sc: pointer to struct scsi_cmnd
* @fsf: pointer to struct zfcp_fsf_req
*/
-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sc->device->host->hostdata[0];
@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
}
}
- debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0be3d4868..36d075842 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2015
*/
#ifndef ZFCP_DBF_H
@@ -17,6 +17,11 @@
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
+enum zfcp_dbf_pseudo_erp_act_type {
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
/**
* struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
* @ready: number of ready recovery actions
@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
u32 d_id;
#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+ u16 pl_len;
} __packed;
/**
@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u32 port_handle;
+ u32 lun_handle;
} __packed;
/**
@@ -279,7 +287,7 @@ static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
if (debug_level_enabled(req->adapter->dbf->hba, level))
- zfcp_dbf_hba_fsf_res(tag, req);
+ zfcp_dbf_hba_fsf_res(tag, level, req);
}
/**
@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
scmd->device->host->hostdata[0];
if (debug_level_enabled(adapter->dbf->scsi, level))
- zfcp_dbf_scsi(tag, scmd, req);
+ zfcp_dbf_scsi(tag, level, scmd, req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3fb410977..a59d67812 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
- if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ /* This switch case might also happen after a forced reopen
+ * was successfully done and thus overwritten with a new
+ * non-forced reopen at `ersfs_2'. In this case, we must not
+ * do the clean-up of the non-forced version.
+ */
+ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+ if (result == ZFCP_ERP_SUCCEEDED)
+ zfcp_scsi_schedule_rport_register(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 5b5006525..c8fed9fa1 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/
#ifndef ZFCP_EXT_H
@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+ struct zfcp_fsf_req *);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 522a633c8..75f820ca1 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
if (zfcp_adapter_multi_buffer_active(adapter)) {
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
+ qtcb->bottom.support.req_buf_length =
+ zfcp_qdio_real_bytes(sg_req);
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
+ qtcb->bottom.support.resp_buf_length =
+ zfcp_qdio_real_bytes(sg_resp);
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
zfcp_qdio_sbale_count(sg_req));
@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
req->handler = zfcp_fsf_send_ct_handler;
req->qtcb->header.port_handle = wka_port->handle;
+ ct->d_id = wka_port->d_id;
req->data = ct;
zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
+ els->d_id = d_id;
req->data = els;
zfcp_dbf_san_req("fssels1", req, d_id);
@@ -1575,7 +1583,7 @@ out:
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req;
+ struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (req && !IS_ERR(req))
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req;
+ struct zfcp_fsf_req *req = NULL;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (req && !IS_ERR(req))
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 57ae3ae10..be1c04b33 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
*/
#ifndef FSF_H
@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
* @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
*/
struct zfcp_fsf_ct_els {
struct scatterlist *req;
@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
void *handler_data;
struct zfcp_port *port;
int status;
+ u32 d_id;
};
#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b3c6ff491..9069f98a1 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;
if (rport) {
+ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 661bb94e2..228b99ee0 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -823,17 +823,6 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
}
/**
- * cxlflash_shutdown() - shutdown handler
- * @pdev: PCI device associated with the host.
- */
-static void cxlflash_shutdown(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
-
- notify_shutdown(cfg, false);
-}
-
-/**
* cxlflash_remove() - PCI entry point to tear down host
* @pdev: PCI device associated with the host.
*
@@ -844,6 +833,11 @@ static void cxlflash_remove(struct pci_dev *pdev)
struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
ulong lock_flags;
+ if (!pci_is_enabled(pdev)) {
+ pr_debug("%s: Device is disabled\n", __func__);
+ return;
+ }
+
/* If a Task Management Function is active, wait for it to complete
* before continuing with remove.
*/
@@ -2685,7 +2679,7 @@ static struct pci_driver cxlflash_driver = {
.id_table = cxlflash_pci_table,
.probe = cxlflash_probe,
.remove = cxlflash_remove,
- .shutdown = cxlflash_shutdown,
+ .shutdown = cxlflash_remove,
.err_handler = &cxlflash_err_handler,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index cd91a684c..4cb79902e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4701,7 +4701,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le16_to_cpu(mpi_reply->DevHandle));
mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
- if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
+ if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
(scmd->sense_buffer[2] == MEDIUM_ERROR) ||
(scmd->sense_buffer[2] == HARDWARE_ERROR)))
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e0a78f53d..bac8cdf9f 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1472,12 +1472,12 @@ retry:
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 333eb2215..0aaf429f3 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,7 +41,8 @@ struct qe_gpio_chip {
static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 41eff805a..104e68d9b 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -70,6 +70,11 @@ int cpm_muram_init(void)
}
muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
muram_pbase = of_translate_address(np, zero);
if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
struct muram_block *entry;
unsigned long start;
+ if (!muram_pool && cpm_muram_init())
+ goto out2;
+
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
if (!start)
goto out2;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 9e9dadb52..eec5e3f6e 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -760,7 +760,6 @@ static int dspi_remove(struct platform_device *pdev)
/* Disconnect from the SPI framework */
clk_disable_unprepare(dspi->clk);
spi_unregister_master(dspi->master);
- spi_master_put(dspi->master);
return 0;
}
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 19c1572f1..800245eac 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -36,6 +36,7 @@ config ION_TEGRA
config ION_HISI
tristate "Ion for Hisilicon"
depends on ARCH_HISI && ION
+ select ION_OF
help
Choose this option if you wish to use ion on Hisilicon Platform.
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index a8822fe2b..f4cee811c 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -69,16 +69,20 @@ inline u32 get_DWORD(struct ks_wlan_private *priv)
return data;
}
-void ks_wlan_hw_wakeup_task(struct work_struct *work)
+static void ks_wlan_hw_wakeup_task(struct work_struct *work)
{
struct ks_wlan_private *priv =
container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
int ps_status = atomic_read(&priv->psstatus.status);
+ long time_left;
if (ps_status == PS_SNOOZE) {
ks_wlan_hw_wakeup_request(priv);
- if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */
- DPRINTK(1, "wake up timeout !!!\n");
+ time_left = wait_for_completion_interruptible_timeout(
+ &priv->psstatus.wakeup_wait,
+ msecs_to_jiffies(20));
+ if (time_left <= 0) {
+ DPRINTK(1, "wake up timeout or interrupted !!!\n");
schedule_work(&priv->ks_wlan_wakeup_task);
return;
}
@@ -1505,7 +1509,7 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
}
-void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
+static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
{
struct hostif_infrastructure_set2_request_t *pp;
uint16_t capability;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 77485235c..32d3a9c07 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -670,13 +670,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
u8 res = _SUCCESS;
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!ph2c) {
res = _FAIL;
goto exit;
}
- paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
+ paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index ccb4e0676..e29d4bd5d 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -63,7 +63,7 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
/* Set bit 14 of display controller */
- dispControl = DISPLAY_CTRL_CLOCK_PHASE;
+ dispControl |= DISPLAY_CTRL_CLOCK_PHASE;
POKE32(CRT_DISPLAY_CTRL, dispControl);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6bed..e825d580c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status != SAM_STAT_GOOD) {
- return;
- }
-
- /*
- * Calculate new residual count based upon length of SCSI data
- * transferred.
- */
- if (length < cmd->data_length) {
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
- } else if (length > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = length - cmd->data_length;
- } else {
- cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
- cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref)
+ if (ack_kref) {
kref_get(&se_cmd->cmd_kref);
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2871,6 +2860,12 @@ static const struct sense_info sense_info_table[] = {
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
},
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426..094a1440e 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
mutex_unlock(&g_device_mutex);
- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned short tdll)
+ unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
unsigned short start = 0;
bool src = true;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
else
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
- if (rc < 0)
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
+ }
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
remote_port, false);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
kfree(xop);
-
- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Don't override an error scsi status if it has already been set
+ */
+ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+ " CHECK_CONDITION -> sending response\n", rc);
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ }
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
}
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9..9a874a899 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -572,7 +572,7 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
goto err;
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 915facbf5..e1134a4d9 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
++uiomem;
}
- priv->dmem_region_start = i;
+ priv->dmem_region_start = uiomem - &uioinfo->mem[0];
priv->num_dmem_regions = pdata->num_dynamic_regions;
for (i = 0; i < pdata->num_dynamic_regions; ++i) {
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 924bad45c..37a37c4d0 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -50,9 +50,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 1;
if (regno < 16) {
- red >>= 8;
- green >>= 8;
- blue >>= 8;
+ red >>= 16 - info->var.red.length;
+ green >>= 16 - info->var.green.length;
+ blue >>= 16 - info->var.blue.length;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 4a2290f90..d5735c120 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -139,7 +139,6 @@ static int mt7621_wdt_probe(struct platform_device *pdev)
if (!IS_ERR(mt7621_wdt_reset))
reset_control_deassert(mt7621_wdt_reset);
- mt7621_wdt_dev.dev = &pdev->dev;
mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause();
watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout,
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 1967919ae..14b4fd428 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -158,7 +158,6 @@ static int rt288x_wdt_probe(struct platform_device *pdev)
rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE;
- rt288x_wdt_dev.dev = &pdev->dev;
rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause();
rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq);
rt288x_wdt_dev.parent = &pdev->dev;
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 5b6a1743e..b3c2cc79c 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -276,32 +276,26 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
switch (handler->flags) {
case ACL_TYPE_ACCESS:
if (acl) {
- umode_t mode = inode->i_mode;
- retval = posix_acl_equiv_mode(acl, &mode);
- if (retval < 0)
+ struct iattr iattr;
+
+ retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
+ if (retval)
goto err_out;
- else {
- struct iattr iattr;
- if (retval == 0) {
- /*
- * ACL can be represented
- * by the mode bits. So don't
- * update ACL.
- */
- acl = NULL;
- value = NULL;
- size = 0;
- }
- /* Updte the mode bits */
- iattr.ia_mode = ((mode & S_IALLUGO) |
- (inode->i_mode & ~S_IALLUGO));
- iattr.ia_valid = ATTR_MODE;
- /* FIXME should we update ctime ?
- * What is the following setxattr update the
- * mode ?
+ if (!acl) {
+ /*
+ * ACL can be represented
+ * by the mode bits. So don't
+ * update ACL.
*/
- v9fs_vfs_setattr_dotl(dentry, &iattr);
+ value = NULL;
+ size = 0;
}
+ iattr.ia_valid = ATTR_MODE;
+ /* FIXME should we update ctime ?
+ * What is the following setxattr update the
+ * mode ?
+ */
+ v9fs_vfs_setattr_dotl(dentry, &iattr);
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 53bb7af4e..247b8dfaf 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -79,11 +79,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- ret = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (ret < 0)
+ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (ret)
return ret;
- if (ret == 0)
- acl = NULL;
}
ret = 0;
break;
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4f67227f6..d0b6b342d 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -95,11 +95,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- ret = posix_acl_equiv_mode(acl, &new_mode);
- if (ret < 0)
+ ret = posix_acl_update_mode(inode, &new_mode, &acl);
+ if (ret)
goto out;
- if (ret == 0)
- acl = NULL;
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 0f5375d8e..eede975e8 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1272,7 +1272,8 @@ again:
statret = __ceph_do_getattr(inode, page,
CEPH_STAT_CAP_INLINE_DATA, !!page);
if (statret < 0) {
- __free_page(page);
+ if (page)
+ __free_page(page);
if (statret == -ENODATA) {
BUG_ON(retry_op != READ_INLINE);
goto again;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 6c58e13fe..3d03e48a9 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+ seq_printf(m, "\nNumber of credits: %d", server->credits);
i++;
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 14ae4b8e1..8c68d03a6 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -271,7 +271,7 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode->createtime = 0;
cifs_inode->epoch = 0;
#ifdef CONFIG_CIFS_SMB2
- get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
+ generate_random_uuid(cifs_inode->lease_key);
#endif
/*
* Can not set i_flags here - they get immediately overwritten to zero
@@ -1271,7 +1271,6 @@ init_cifs(void)
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
spin_lock_init(&cifs_tcp_ses_lock);
- spin_lock_init(&cifs_file_list_lock);
spin_lock_init(&GlobalMid_Lock);
get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8f1d8c1e7..65f78b7a9 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -833,6 +833,7 @@ struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head openFileList;
+ spinlock_t open_file_lock; /* protects list above */
struct cifs_ses *ses; /* pointer to session associated with */
char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
@@ -889,7 +890,7 @@ struct cifs_tcon {
#endif /* CONFIG_CIFS_STATS2 */
__u64 bytes_read;
__u64 bytes_written;
- spinlock_t stat_lock;
+ spinlock_t stat_lock; /* protects the two fields above */
#endif /* CONFIG_CIFS_STATS */
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
@@ -1040,8 +1041,10 @@ struct cifs_fid_locks {
};
struct cifsFileInfo {
+ /* following two lists are protected by tcon->open_file_lock */
struct list_head tlist; /* pointer to next fid owned by tcon */
struct list_head flist; /* next fid (file instance) for this inode */
+ /* lock list below protected by cifsi->lock_sem */
struct cifs_fid_locks *llist; /* brlocks held by this fid */
kuid_t uid; /* allows finding which FileInfo structure */
__u32 pid; /* process id who opened file */
@@ -1049,11 +1052,12 @@ struct cifsFileInfo {
/* BB add lock scope info here if needed */ ;
/* lock scope id (0 if none) */
struct dentry *dentry;
- unsigned int f_flags;
struct tcon_link *tlink;
+ unsigned int f_flags;
bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1;
- int count; /* refcount protected by cifs_file_list_lock */
+ int count;
+ spinlock_t file_info_lock; /* protects four flag/count fields above */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
@@ -1120,7 +1124,7 @@ struct cifs_writedata {
/*
* Take a reference on the file private data. Must be called with
- * cifs_file_list_lock held.
+ * cfile->file_info_lock held.
*/
static inline void
cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
@@ -1514,8 +1518,10 @@ require use of the stronger protocol */
* GlobalMid_Lock protects:
* list operations on pending_mid_q and oplockQ
* updates to XID counters, multiplex id and SMB sequence numbers
- * cifs_file_list_lock protects:
- * list operations on tcp and SMB session lists and tCon lists
+ * tcp_ses_lock protects:
+ * list operations on tcp and SMB session lists
+ * tcon->open_file_lock protects the list of open files hanging off the tcon
+ * cfile->file_info_lock protects counters and fields in cifs file struct
* f_owner.lock protects certain per file struct operations
* mapping->page_lock protects certain per page operations
*
@@ -1547,18 +1553,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
* tcp session, and the list of tcon's per smb session. It also protects
* the reference counters for the server, smb session, and tcon. Finally,
* changes to the tcon->tidStatus should be done while holding this lock.
+ * generally the locks should be taken in order tcp_ses_lock before
+ * tcon->open_file_lock and that before file->file_info_lock since the
+ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
*/
GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
-/*
- * This lock protects the cifs_file->llist and cifs_file->flist
- * list operations, and updates to some flags (cifs_file->invalidHandle)
- * It will be moved to either use the tcon->stat_lock or equivalent later.
- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
- * the cifs_tcp_ses_lock must be grabbed first and released last.
- */
-GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
-
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
/* Outstanding dir notify requests */
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index d47197ea4..78046051b 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
struct list_head *tmp1;
/* list all files open on tree connection and mark them invalid */
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
open_file->oplock_break_cancelled = true;
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
/*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted
* to this tcon.
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e4f4bad8..7b6717952 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2163,7 +2163,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
sizeof(tcp_ses->dstaddr));
#ifdef CONFIG_CIFS_SMB2
- get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
+ generate_random_uuid(tcp_ses->client_guid);
#endif
/*
* at this point we are the only ones with the pointer
@@ -3688,14 +3688,16 @@ remote_path_check:
goto mount_fail_check;
}
- rc = cifs_are_all_path_components_accessible(server,
+ if (rc != -EREMOTE) {
+ rc = cifs_are_all_path_components_accessible(server,
xid, tcon, cifs_sb,
full_path);
- if (rc != 0) {
- cifs_dbg(VFS, "cannot query dirs between root and final path, "
- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- rc = 0;
+ if (rc != 0) {
+ cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = 0;
+ }
}
kfree(full_path);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 579e41b35..605438afe 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
mutex_init(&cfile->fh_mutex);
+ spin_lock_init(&cfile->file_info_lock);
cifs_sb_active(inode->i_sb);
@@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
oplock = 0;
}
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
oplock = fid->pending_open->oplock;
list_del(&fid->pending_open->olist);
@@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
server->ops->set_fid(cfile, fid, oplock);
list_add(&cfile->tlist, &tcon->openFileList);
+
/* if readable file instance put first in list*/
if (file->f_mode & FMODE_READ)
list_add(&cfile->flist, &cinode->openFileList);
else
list_add_tail(&cfile->flist, &cinode->openFileList);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
if (fid->purge_cache)
cifs_zap_mapping(inode);
@@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&cifs_file->file_info_lock);
cifsFileInfo_get_locked(cifs_file);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_file->file_info_lock);
return cifs_file;
}
/*
* Release a reference on the file private data. This may involve closing
* the filehandle out on the server. Must be called without holding
- * cifs_file_list_lock.
+ * tcon->open_file_lock and cifs_file->file_info_lock.
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
@@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct cifs_pending_open open;
bool oplock_break_cancelled;
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
+
+ spin_lock(&cifs_file->file_info_lock);
if (--cifs_file->count > 0) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_file->file_info_lock);
+ spin_unlock(&tcon->open_file_lock);
return;
}
+ spin_unlock(&cifs_file->file_info_lock);
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
@@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
cifs_set_oplock_level(cifsi, 0);
}
- spin_unlock(&cifs_file_list_lock);
+
+ spin_unlock(&tcon->open_file_lock);
oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
@@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
server = tcon->ses->server;
cifs_dbg(FYI, "Freeing private data in close dir\n");
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&cfile->file_info_lock);
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
if (server->ops->close_dir)
rc = server->ops->close_dir(xid, tcon, &cfile->fid);
else
@@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
buf = cfile->srch_inf.ntwrk_buf_start;
if (buf) {
@@ -1728,12 +1735,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
{
struct cifsFileInfo *open_file = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
/* we could simply get the first_list_entry since write-only entries
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
@@ -1744,8 +1752,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (!open_file->invalidHandle) {
/* found a good file */
/* lock it so it will not be closed on us */
- cifsFileInfo_get_locked(open_file);
- spin_unlock(&cifs_file_list_lock);
+ cifsFileInfo_get(open_file);
+ spin_unlock(&tcon->open_file_lock);
return open_file;
} /* else might as well continue, and look for
another, or simply have the caller reopen it
@@ -1753,7 +1761,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
} else /* write only file */
break; /* write only files are last so must be done */
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return NULL;
}
@@ -1762,6 +1770,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
+ struct cifs_tcon *tcon;
bool any_available = false;
int rc;
unsigned int refind = 0;
@@ -1777,15 +1786,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+ tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
refind_writable:
if (refind > MAX_REOPEN_ATT) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return NULL;
}
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1796,8 +1806,8 @@ refind_writable:
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
- cifsFileInfo_get_locked(open_file);
- spin_unlock(&cifs_file_list_lock);
+ cifsFileInfo_get(open_file);
+ spin_unlock(&tcon->open_file_lock);
return open_file;
} else {
if (!inv_file)
@@ -1813,24 +1823,24 @@ refind_writable:
if (inv_file) {
any_available = false;
- cifsFileInfo_get_locked(inv_file);
+ cifsFileInfo_get(inv_file);
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
if (inv_file) {
rc = cifs_reopen_file(inv_file, false);
if (!rc)
return inv_file;
else {
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_move_tail(&inv_file->flist,
&cifs_inode->openFileList);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
cifsFileInfo_put(inv_file);
- spin_lock(&cifs_file_list_lock);
++refind;
inv_file = NULL;
+ spin_lock(&tcon->open_file_lock);
goto refind_writable;
}
}
@@ -3618,15 +3628,17 @@ static int cifs_readpage(struct file *file, struct page *page)
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
+ struct cifs_tcon *tcon =
+ cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return 1;
}
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return 0;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 813fe13c2..c6729156f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -120,6 +120,7 @@ tconInfoAlloc(void)
++ret_buf->tc_count;
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
+ spin_lock_init(&ret_buf->open_file_lock);
#ifdef CONFIG_CIFS_STATS
spin_lock_init(&ret_buf->stat_lock);
#endif
@@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
continue;
cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
@@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
&netfile->oplock_break);
netfile->oplock_break_cancelled = false;
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "No matching file for oplock break\n");
return true;
@@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb)
void
cifs_del_pending_open(struct cifs_pending_open *open)
{
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
list_del(&open->olist);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
void
@@ -635,7 +636,7 @@ void
cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
struct cifs_pending_open *open)
{
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tlink_tcon(tlink)->open_file_lock);
cifs_add_pending_open_locked(fid, tlink, open);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 65cf85dcd..8f6a2a586 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -597,14 +597,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
/* close and restart search */
cifs_dbg(FYI, "search backing up - close and restart search\n");
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&cfile->file_info_lock);
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
if (server->ops->close_dir)
server->ops->close_dir(xid, tcon, &cfile->fid);
} else
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
if (cfile->srch_inf.ntwrk_buf_start) {
cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
if (cfile->srch_inf.smallBuf)
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index 0ffa18094..238759c14 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -61,4 +61,14 @@
/* Maximum buffer size value we can send with 1 credit */
#define SMB2_MAX_BUFFER_SIZE 65536
+/*
+ * Maximum number of credits to keep available.
+ * This value is chosen somewhat arbitrarily. The Windows client
+ * defaults to 128 credits, the Windows server allows clients up to
+ * 512 credits, and the NetApp server does not limit clients at all.
+ * Choose a high enough value such that the client shouldn't limit
+ * performance.
+ */
+#define SMB2_MAX_CREDITS_AVAILABLE 32000
+
#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 4f0231e68..1238cd355 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
struct tcon_link *tlink;
int rc;
+ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+ (buf->Attributes == 0))
+ return 0; /* would be a no op, no sense sending this */
+
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
+
rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
SMB2_OP_SET_INFO);
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 389fb9f8c..3d383489b 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -549,19 +549,19 @@ smb2_is_valid_lease_break(char *buffer)
list_for_each(tmp1, &server->smb_ses_list) {
ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
- spin_lock(&cifs_file_list_lock);
list_for_each(tmp2, &ses->tcon_list) {
tcon = list_entry(tmp2, struct cifs_tcon,
tcon_list);
+ spin_lock(&tcon->open_file_lock);
cifs_stats_inc(
&tcon->stats.cifs_stats.num_oplock_brks);
if (smb2_tcon_has_lease(tcon, rsp, lw)) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
+ spin_unlock(&tcon->open_file_lock);
}
- spin_unlock(&cifs_file_list_lock);
}
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -603,7 +603,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
cfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
@@ -615,7 +615,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
cifs_dbg(FYI, "file id match, oplock break\n");
cinode = CIFS_I(d_inode(cfile->dentry));
-
+ spin_lock(&cfile->file_info_lock);
if (!CIFS_CACHE_WRITE(cinode) &&
rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
cfile->oplock_break_cancelled = true;
@@ -637,14 +637,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
clear_bit(
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&cinode->flags);
-
+ spin_unlock(&cfile->file_info_lock);
queue_work(cifsiod_wq, &cfile->oplock_break);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "No matching file for oplock break\n");
return true;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d203c0329..0e73cefca 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -287,7 +287,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
cifs_dbg(FYI, "Link Speed %lld\n",
le64_to_cpu(out_buf->LinkSpeed));
}
-
+ kfree(out_buf);
return rc;
}
#endif /* STATS2 */
@@ -541,6 +541,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
server->ops->set_oplock_level(cinode, oplock, fid->epoch,
&fid->purge_cache);
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
}
static void
@@ -699,6 +700,7 @@ smb2_clone_range(const unsigned int xid,
cchunk_out:
kfree(pcchunk);
+ kfree(retbuf);
return rc;
}
@@ -823,7 +825,6 @@ smb2_duplicate_extents(const unsigned int xid,
{
int rc;
unsigned int ret_data_len;
- char *retbuf = NULL;
struct duplicate_extents_to_file dup_ext_buf;
struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
@@ -849,7 +850,7 @@ smb2_duplicate_extents(const unsigned int xid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
true /* is_fsctl */, (char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file),
- (char **)&retbuf,
+ NULL,
&ret_data_len);
if (ret_data_len > 0)
@@ -872,7 +873,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile)
{
struct fsctl_set_integrity_information_req integr_info;
- char *retbuf = NULL;
unsigned int ret_data_len;
integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
@@ -884,7 +884,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
FSCTL_SET_INTEGRITY_INFORMATION,
true /* is_fsctl */, (char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req),
- (char **)&retbuf,
+ NULL,
&ret_data_len);
}
@@ -1041,7 +1041,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
static void
smb2_new_lease_key(struct cifs_fid *fid)
{
- get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
+ generate_random_uuid(fid->lease_key);
}
#define SMB2_SYMLINK_STRUCT_SIZE \
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 29e06db5f..3eec96ca8 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -100,7 +100,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
hdr->ProtocolId = SMB2_PROTO_NUMBER;
hdr->StructureSize = cpu_to_le16(64);
hdr->Command = smb2_cmd;
- hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
+ if (tcon && tcon->ses && tcon->ses->server) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ spin_lock(&server->req_lock);
+ /* Request up to 2 credits but don't go over the limit. */
+ if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE)
+ hdr->CreditRequest = cpu_to_le16(0);
+ else
+ hdr->CreditRequest = cpu_to_le16(
+ min_t(int, SMB2_MAX_CREDITS_AVAILABLE -
+ server->credits, 2));
+ spin_unlock(&server->req_lock);
+ } else {
+ hdr->CreditRequest = cpu_to_le16(2);
+ }
hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
if (!tcon)
@@ -590,6 +604,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
char *security_blob = NULL;
unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
+ u64 previous_session = ses->Suid;
cifs_dbg(FYI, "Session Setup\n");
@@ -627,6 +642,10 @@ ssetup_ntlmssp_authenticate:
return rc;
req->hdr.SessionId = 0; /* First session, not a reauthenticate */
+
+ /* if reconnect, we need to send previous sess id, otherwise it is 0 */
+ req->PreviousSessionId = previous_session;
+
req->Flags = 0; /* MBZ */
/* to enable echos and oplocks */
req->hdr.CreditRequest = cpu_to_le16(3);
@@ -1164,7 +1183,7 @@ create_durable_v2_buf(struct cifs_fid *pfid)
buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
- get_random_bytes(buf->dcontext.CreateGuid, 16);
+ generate_random_uuid(buf->dcontext.CreateGuid);
memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
@@ -2057,6 +2076,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (rdata->credits) {
buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
+ buf->CreditRequest = buf->CreditCharge;
spin_lock(&server->req_lock);
server->credits += rdata->credits -
le16_to_cpu(buf->CreditCharge);
@@ -2243,6 +2263,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (wdata->credits) {
req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
+ req->hdr.CreditRequest = req->hdr.CreditCharge;
spin_lock(&server->req_lock);
server->credits += wdata->credits -
le16_to_cpu(req->hdr.CreditCharge);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ff88d9feb..fd3709e8d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -276,7 +276,7 @@ struct smb2_sess_setup_req {
__le32 Channel;
__le16 SecurityBufferOffset;
__le16 SecurityBufferLength;
- __le64 PreviousSessionId;
+ __u64 PreviousSessionId;
__u8 Buffer[1]; /* variable length GSS security buffer */
} __packed;
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index c502c1169..55d64fba1 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -152,7 +152,10 @@ static int do_page_crypto(struct inode *inode,
struct page *src_page, struct page *dest_page,
gfp_t gfp_flags)
{
- u8 xts_tweak[FS_XTS_TWEAK_SIZE];
+ struct {
+ __le64 index;
+ u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
+ } xts_tweak;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
@@ -172,17 +175,15 @@ static int do_page_crypto(struct inode *inode,
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
fscrypt_complete, &ecr);
- BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- FS_XTS_TWEAK_SIZE - sizeof(index));
+ BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
+ xts_tweak.index = cpu_to_le64(index);
+ memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, PAGE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
- xts_tweak);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index ed115acb5..6865663aa 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -109,6 +109,8 @@ int fscrypt_process_policy(struct file *filp,
if (ret)
return ret;
+ inode_lock(inode);
+
if (!inode_has_encryption_context(inode)) {
if (!S_ISDIR(inode->i_mode))
ret = -EINVAL;
@@ -127,6 +129,8 @@ int fscrypt_process_policy(struct file *filp,
ret = -EINVAL;
}
+ inode_unlock(inode);
+
mnt_drop_write_file(filp);
return ret;
}
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 42f1d1814..e725aa089 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -190,15 +190,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
case ACL_TYPE_ACCESS:
name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- error = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (error < 0)
+ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (error)
return error;
- else {
- inode->i_ctime = CURRENT_TIME_SEC;
- mark_inode_dirty(inode);
- if (error == 0)
- acl = NULL;
- }
+ inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(inode);
}
break;
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index c6601a476..dfa519979 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -193,15 +193,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- error = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (error < 0)
+ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (error)
return error;
- else {
- inode->i_ctime = ext4_current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
- if (error == 0)
- acl = NULL;
- }
+ inode->i_ctime = ext4_current_time(inode);
+ ext4_mark_inode_dirty(handle, inode);
}
break;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 73bcfd41f..42145be5c 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
EXT4_ATTR_FEATURE(lazy_itable_init);
EXT4_ATTR_FEATURE(batched_discard);
EXT4_ATTR_FEATURE(meta_bg_resize);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
EXT4_ATTR_FEATURE(encryption);
+#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
ATTR_LIST(encryption),
+#endif
ATTR_LIST(metadata_csum_seed),
NULL,
};
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 4dcc9e28d..31344247c 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -210,12 +210,10 @@ static int __f2fs_set_acl(struct inode *inode, int type,
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- error = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (error < 0)
+ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (error)
return error;
set_acl_inode(inode, inode->i_mode);
- if (error == 0)
- acl = NULL;
}
break;
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 363ba9e9d..2524807ee 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -92,17 +92,11 @@ int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (type == ACL_TYPE_ACCESS) {
umode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
- if (error < 0)
+ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (error)
return error;
-
- if (error == 0)
- acl = NULL;
-
- if (mode != inode->i_mode) {
- inode->i_mode = mode;
+ if (mode != inode->i_mode)
mark_inode_dirty(inode);
- }
}
if (acl) {
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
index ab7ea2506..9b92058a1 100644
--- a/fs/hfsplus/posix_acl.c
+++ b/fs/hfsplus/posix_acl.c
@@ -65,8 +65,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
case ACL_TYPE_ACCESS:
xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- err = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (err < 0)
+ err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (err)
return err;
}
err = 0;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ad0c745eb..871c8b392 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
pri_bh = NULL;
root_found:
+ /* We don't support read-write mounts */
+ if (!(s->s_flags & MS_RDONLY)) {
+ error = -EACCES;
+ goto out_freebh;
+ }
if (joliet_level && (pri == NULL || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
@@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb,
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- /* We don't support read-write mounts */
- if (!(flags & MS_RDONLY))
- return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3d8246a9f..e1652665b 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1149,6 +1149,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "file as BJ_Reserved");
spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+ spin_unlock(&journal->j_list_lock);
} else if (jh->b_transaction == journal->j_committing_transaction) {
/* first access by this transaction */
jh->b_modified = 0;
@@ -1156,8 +1157,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "set next transaction");
spin_lock(&journal->j_list_lock);
jh->b_next_transaction = transaction;
+ spin_unlock(&journal->j_list_lock);
}
- spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
/*
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index bc2693d56..2a0f2a104 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -233,9 +233,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
case ACL_TYPE_ACCESS:
xprefix = JFFS2_XPREFIX_ACL_ACCESS;
if (acl) {
- umode_t mode = inode->i_mode;
- rc = posix_acl_equiv_mode(acl, &mode);
- if (rc < 0)
+ umode_t mode;
+
+ rc = posix_acl_update_mode(inode, &mode, &acl);
+ if (rc)
return rc;
if (inode->i_mode != mode) {
struct iattr attr;
@@ -247,8 +248,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (rc < 0)
return rc;
}
- if (rc == 0)
- acl = NULL;
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 21fa92ba2..3a1e1554a 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -78,13 +78,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
case ACL_TYPE_ACCESS:
ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- rc = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (rc < 0)
+ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (rc)
return rc;
inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- if (rc == 0)
- acl = NULL;
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 217847679..2905479f2 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work)
u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
u64 end = (hdr->args.offset + hdr->args.count +
PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
+ u64 lwb = hdr->args.offset + hdr->args.count;
ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
- (end - start) >> SECTOR_SHIFT, end);
+ (end - start) >> SECTOR_SHIFT, lwb);
}
pnfs_ld_write_done(hdr);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 322c2585b..b9c65421e 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -41,6 +41,17 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
}
+static bool
+nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
+ fmode_t flags)
+{
+ if (delegation != NULL && (delegation->type & flags) == flags &&
+ !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
+ !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+ return true;
+ return false;
+}
+
static int
nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
{
@@ -50,8 +61,7 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
flags &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (delegation != NULL && (delegation->type & flags) == flags &&
- !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ if (nfs4_is_valid_delegation(delegation, flags)) {
if (mark)
nfs_mark_delegation_referenced(delegation);
ret = 1;
@@ -893,7 +903,7 @@ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
flags &= FMODE_READ|FMODE_WRITE;
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
- ret = (delegation != NULL && (delegation->type & flags) == flags);
+ ret = nfs4_is_valid_delegation(delegation, flags);
if (ret) {
nfs4_stateid_copy(dst, &delegation->stateid);
nfs_mark_delegation_referenced(delegation);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 177fefb26..6bc5a68e3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -435,11 +435,11 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
return 0;
nfsi = NFS_I(inode);
- if (entry->fattr->fileid == nfsi->fileid)
- return 1;
- if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
- return 1;
- return 0;
+ if (entry->fattr->fileid != nfsi->fileid)
+ return 0;
+ if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0)
+ return 0;
+ return 1;
}
static
@@ -517,6 +517,8 @@ again:
&entry->fattr->fsid))
goto out;
if (nfs_same_file(dentry, entry)) {
+ if (!entry->fh->size)
+ goto out;
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
if (!status)
@@ -529,6 +531,10 @@ again:
goto again;
}
}
+ if (!entry->fh->size) {
+ d_lookup_done(dentry);
+ goto out;
+ }
inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label);
alias = d_splice_alias(inode, dentry);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 64b43b4ad..608501971 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -443,6 +443,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
return PTR_ERR(task);
+ rpc_put_task(task);
return 0;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index cada00aa5..8353f33f0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1498,6 +1498,9 @@ restart:
__func__, status);
case -ENOENT:
case -ENOMEM:
+ case -EACCES:
+ case -EROFS:
+ case -EIO:
case -ESTALE:
/* Open state on this file cannot be recovered */
nfs4_state_mark_recovery_failed(state, status);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 45007acaf..a2b65fc56 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -366,14 +366,21 @@ static struct notifier_block nfsd_inet6addr_notifier = {
};
#endif
+/* Only used under nfsd_mutex, so this atomic may be overkill: */
+static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
+
static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
+ /* check if the notifier still has clients */
+ if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
+ unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
- unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
+ unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
+ }
+
/*
* write_ports can create the server without actually starting
* any threads--if we get shut down before any threads are
@@ -488,10 +495,13 @@ int nfsd_create_serv(struct net *net)
}
set_max_drc();
- register_inetaddr_notifier(&nfsd_inetaddr_notifier);
+ /* check if the notifier is already set */
+ if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
+ register_inetaddr_notifier(&nfsd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
- register_inet6addr_notifier(&nfsd_inet6addr_notifier);
+ register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
+ }
do_gettimeofday(&nn->nfssvc_boot); /* record boot time */
return 0;
}
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 216243472..164307b99 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -241,13 +241,11 @@ int ocfs2_set_acl(handle_t *handle,
case ACL_TYPE_ACCESS:
name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
- umode_t mode = inode->i_mode;
- ret = posix_acl_equiv_mode(acl, &mode);
- if (ret < 0)
- return ret;
+ umode_t mode;
- if (ret == 0)
- acl = NULL;
+ ret = posix_acl_update_mode(inode, &mode, &acl);
+ if (ret)
+ return ret;
ret = ocfs2_acl_set_mode(inode, di_bh,
handle, mode);
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index 28f2195cd..7a3754488 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -73,14 +73,11 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- umode_t mode = inode->i_mode;
- /*
- * can we represent this with the traditional file
- * mode permission bits?
- */
- error = posix_acl_equiv_mode(acl, &mode);
- if (error < 0) {
- gossip_err("%s: posix_acl_equiv_mode err: %d\n",
+ umode_t mode;
+
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error) {
+ gossip_err("%s: posix_acl_update_mode err: %d\n",
__func__,
error);
return error;
@@ -90,8 +87,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
SetModeFlag(orangefs_inode);
inode->i_mode = mode;
mark_inode_dirty_sync(inode);
- if (error == 0)
- acl = NULL;
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 43fdc2765..abadbc30e 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -57,6 +57,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
ssize_t list_size, size, value_size = 0;
char *buf, *name, *value = NULL;
int uninitialized_var(error);
+ size_t slen;
if (!old->d_inode->i_op->getxattr ||
!new->d_inode->i_op->getxattr)
@@ -79,7 +80,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
goto out;
}
- for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+ for (name = buf; list_size; name += slen) {
+ slen = strnlen(name, list_size) + 1;
+
+ /* underlying fs providing us with an broken xattr list? */
+ if (WARN_ON(slen > list_size)) {
+ error = -EIO;
+ break;
+ }
+ list_size -= slen;
+
if (ovl_is_private_xattr(name))
continue;
retry:
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 1560fdc09..74e696426 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -14,6 +14,7 @@
#include <linux/cred.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/atomic.h>
#include "overlayfs.h"
void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -37,8 +38,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
{
struct dentry *temp;
char name[20];
+ static atomic_t temp_id = ATOMIC_INIT(0);
- snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
+ /* counter is allowed to wrap, since temp dentries are ephemeral */
+ snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
temp = lookup_one_len(name, workdir, strlen(name));
if (!IS_ERR(temp) && temp->d_inode) {
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 59d47ab07..bfc3ec388 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -626,6 +626,37 @@ no_mem:
}
EXPORT_SYMBOL_GPL(posix_acl_create);
+/**
+ * posix_acl_update_mode - update mode in set_acl
+ *
+ * Update the file mode when setting an ACL: compute the new file permission
+ * bits based on the ACL. In addition, if the ACL is equivalent to the new
+ * file mode, set *acl to NULL to indicate that no ACL should be set.
+ *
+ * As with chmod, clear the setgit bit if the caller is not in the owning group
+ * or capable of CAP_FSETID (see inode_change_ok).
+ *
+ * Called from set_acl inode operations.
+ */
+int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
+ struct posix_acl **acl)
+{
+ umode_t mode = inode->i_mode;
+ int error;
+
+ error = posix_acl_equiv_mode(*acl, &mode);
+ if (error < 0)
+ return error;
+ if (error == 0)
+ *acl = NULL;
+ if (!in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ mode &= ~S_ISGID;
+ *mode_p = mode;
+ return 0;
+}
+EXPORT_SYMBOL(posix_acl_update_mode);
+
/*
* Fix up the uids and gids in posix acl extended attributes in place.
*/
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 7a034d62c..2340262a7 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -377,13 +377,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
{
int i;
- cxt->max_dump_cnt = 0;
if (!cxt->przs)
return;
- for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
+ for (i = 0; i < cxt->max_dump_cnt; i++)
persistent_ram_free(cxt->przs[i]);
+
kfree(cxt->przs);
+ cxt->max_dump_cnt = 0;
}
static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
@@ -408,7 +409,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
GFP_KERNEL);
if (!cxt->przs) {
dev_err(dev, "failed to initialize a prz array for dumps\n");
- goto fail_prz;
+ goto fail_mem;
}
for (i = 0; i < cxt->max_dump_cnt; i++) {
@@ -419,6 +420,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
err = PTR_ERR(cxt->przs[i]);
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
cxt->record_size, (unsigned long long)*paddr, err);
+
+ while (i > 0) {
+ i--;
+ persistent_ram_free(cxt->przs[i]);
+ }
goto fail_prz;
}
*paddr += cxt->record_size;
@@ -426,7 +432,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
return 0;
fail_prz:
- ramoops_free_przs(cxt);
+ kfree(cxt->przs);
+fail_mem:
+ cxt->max_dump_cnt = 0;
return err;
}
@@ -659,7 +667,6 @@ static int ramoops_remove(struct platform_device *pdev)
struct ramoops_context *cxt = &oops_cxt;
pstore_unregister(&cxt->pstore);
- cxt->max_dump_cnt = 0;
kfree(cxt->pstore.buf);
cxt->pstore.bufsize = 0;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 76c3f80ef..364d2dffe 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -47,43 +47,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
return atomic_read(&prz->buffer->start);
}
-/* increase and wrap the start pointer, returning the old value */
-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
-{
- int old;
- int new;
-
- do {
- old = atomic_read(&prz->buffer->start);
- new = old + a;
- while (unlikely(new >= prz->buffer_size))
- new -= prz->buffer_size;
- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
-
- return old;
-}
-
-/* increase the size counter until it hits the max size */
-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
-{
- size_t old;
- size_t new;
-
- if (atomic_read(&prz->buffer->size) == prz->buffer_size)
- return;
-
- do {
- old = atomic_read(&prz->buffer->size);
- new = old + a;
- if (new > prz->buffer_size)
- new = prz->buffer_size;
- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
-}
-
static DEFINE_RAW_SPINLOCK(buffer_lock);
/* increase and wrap the start pointer, returning the old value */
-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{
int old;
int new;
@@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
}
/* increase the size counter until it hits the max size */
-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
+static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
{
size_t old;
size_t new;
@@ -124,9 +91,6 @@ exit:
raw_spin_unlock_irqrestore(&buffer_lock, flags);
}
-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
-
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
uint8_t *data, size_t len, uint8_t *ecc)
{
@@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
const void *s, unsigned int start, unsigned int count)
{
struct persistent_ram_buffer *buffer = prz->buffer;
- memcpy(buffer->data + start, s, count);
+ memcpy_toio(buffer->data + start, s, count);
persistent_ram_update_ecc(prz, start, count);
}
@@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
}
prz->old_log_size = size;
- memcpy(prz->old_log, &buffer->data[start], size - start);
- memcpy(prz->old_log + size - start, &buffer->data[0], start);
+ memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
+ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
}
int notrace persistent_ram_write(struct persistent_ram_zone *prz,
@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
return NULL;
}
- buffer_start_add = buffer_start_add_locked;
- buffer_size_add = buffer_size_add_locked;
-
if (memtype)
va = ioremap(start, size);
else
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index dbed42f75..27376681c 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -242,13 +242,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
- error = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (error < 0)
+ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ if (error)
return error;
- else {
- if (error == 0)
- acl = NULL;
- }
}
break;
case ACL_TYPE_DEFAULT:
diff --git a/fs/super.c b/fs/super.c
index c2ff475c1..47d11e046 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1379,8 +1379,8 @@ int freeze_super(struct super_block *sb)
}
}
/*
- * This is just for debugging purposes so that fs can warn if it
- * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
+ * For debugging purposes so that fs can warn if it sees write activity
+ * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
*/
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
up_write(&sb->s_umount);
@@ -1399,7 +1399,7 @@ int thaw_super(struct super_block *sb)
int error;
down_write(&sb->s_umount);
- if (sb->s_writers.frozen == SB_UNFROZEN) {
+ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
up_write(&sb->s_umount);
return -EINVAL;
}
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 11a004114..c9ee6f6ef 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -172,6 +172,7 @@ out_cancel:
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ host_ui->xattr_names -= nm->len;
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -476,6 +477,7 @@ out_cancel:
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_names += nm->len;
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b6e527b8e..8a0dec89c 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -257,16 +257,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
return error;
if (type == ACL_TYPE_ACCESS) {
- umode_t mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
-
- if (error <= 0) {
- acl = NULL;
-
- if (error < 0)
- return error;
- }
+ umode_t mode;
+ error = posix_acl_update_mode(inode, &mode, &acl);
+ if (error)
+ return error;
error = xfs_set_mode(inode, mode);
if (error)
return error;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d3778652e..988903a59 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -938,7 +938,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
#endif
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+ struct drm_gem_object *obj,
+ int flags);
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 29050337d..da59fd9cd 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -269,6 +269,8 @@
#define IMX6QDL_CLK_PRG0_APB 256
#define IMX6QDL_CLK_PRG1_APB 257
#define IMX6QDL_CLK_PRE_AXI 258
-#define IMX6QDL_CLK_END 259
+#define IMX6QDL_CLK_MLB_SEL 259
+#define IMX6QDL_CLK_MLB_PODF 260
+#define IMX6QDL_CLK_END 261
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 631ba33bb..32dc0cbd5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -639,19 +639,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq >= target_freq)
- return i;
+ return pos - table;
- best = i;
+ best = pos;
}
- return best;
+ return best - table;
}
/* Find lowest freq at or above target in a table in descending order */
@@ -659,28 +659,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq > target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found above target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Works only on sorted freq-tables */
@@ -700,28 +700,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq < target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found below target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Find highest freq at or below target in a table in descending order */
@@ -729,19 +729,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq <= target_freq)
- return i;
+ return pos - table;
- best = i;
+ best = pos;
}
- return best;
+ return best - table;
}
/* Works only on sorted freq-tables */
@@ -761,32 +761,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq < target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found below target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
/* Choose the closest freq */
- if (target_freq - table[best].frequency > freq - target_freq)
- return i;
+ if (target_freq - best->frequency > freq - target_freq)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Find closest freq to target in a table in descending order */
@@ -794,32 +794,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq > target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found above target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
/* Choose the closest freq */
- if (table[best].frequency - target_freq > target_freq - freq)
- return i;
+ if (best->frequency - target_freq > target_freq - freq)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Works only on sorted freq-tables */
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 0a83a1e64..4db00b02c 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
return -EINVAL;
}
-static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
-{
- return ERR_PTR(-EINVAL);
-}
-
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
struct device *dev, int index)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c26d4638f..fe99e6f95 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -450,8 +450,8 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
-extern void dissolve_free_huge_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+extern int dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h)
{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
-#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define dissolve_free_huge_pages(s, e) 0
#define hugepage_migration_supported(h) false
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 99ac022ed..3a8610ea6 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -290,7 +290,7 @@
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index b519e137b..bbfce62a0 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -129,6 +129,8 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
+void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t start, unsigned int len);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index d5d3d741f..bf1046d03 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -93,6 +93,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *);
extern int posix_acl_chmod(struct inode *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
+extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
extern int simple_set_acl(struct inode *, struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index fb8e3b6fe..c21190089 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -177,6 +177,7 @@ enum tcm_sense_reason_table {
TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
+ TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
#undef R
};
diff --git a/init/Kconfig b/init/Kconfig
index 31dbc2b01..e442b914c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -30,6 +30,7 @@ menu "General setup"
config SCHED_MUQSS
bool "MuQSS cpu scheduler"
+ select HIGH_RES_TIMERS
default n
---help---
The Multiple Queue Skiplist Scheduler for excellent interactivity and
@@ -379,7 +380,7 @@ choice
# Kind of a stub config for the pure tick based cputime accounting
config TICK_CPU_ACCOUNTING
bool "Simple tick based cputime accounting"
- depends on !S390 && !NO_HZ_FULL && !SCHED_MUQSS
+ depends on !S390 && !NO_HZ_FULL
help
This is the basic tick based cputime accounting that maintains
statistics about user, system and idle time spent on per jiffies
@@ -404,7 +405,6 @@ config VIRT_CPU_ACCOUNTING_GEN
bool "Full dynticks CPU time accounting"
depends on HAVE_CONTEXT_TRACKING
depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
- depends on !SCHED_MUQSS
select VIRT_CPU_ACCOUNTING
select CONTEXT_TRACKING
help
@@ -582,7 +582,7 @@ config CONTEXT_TRACKING
config CONTEXT_TRACKING_FORCE
bool "Force context tracking"
depends on CONTEXT_TRACKING
- default y if !NO_HZ_FULL
+ default y if !NO_HZ_FULL && !SCHED_MUQSS
help
The major pre-requirement for full dynticks to work is to
support the context tracking subsystem. But there are also
@@ -740,7 +740,6 @@ config RCU_NOCB_CPU
bool "Offload RCU callback processing from boot-selected CPUs"
depends on TREE_RCU || PREEMPT_RCU
depends on RCU_EXPERT || NO_HZ_FULL
- depends on !SCHED_MUQSS
default n
help
Use this option to reduce OS jitter for aggressive HPC or
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 2a202a846..ecde22d15 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -5,6 +5,7 @@
choice
prompt "Timer frequency"
default HZ_250
+ default HZ_100 if SCHED_MUQSS
help
Allows the configuration of the timer frequency. It is customary
to have the timer interrupt run at 1000 Hz but 100 Hz may be more
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index abd286afb..a4775f345 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -411,8 +411,29 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
}
EXPORT_SYMBOL_GPL(irq_map_generic_chip);
+static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
+{
+ struct irq_data *data = irq_domain_get_irq_data(d, virq);
+ struct irq_domain_chip_generic *dgc = d->gc;
+ unsigned int hw_irq = data->hwirq;
+ struct irq_chip_generic *gc;
+ int irq_idx;
+
+ gc = irq_get_domain_generic_chip(d, hw_irq);
+ if (!gc)
+ return;
+
+ irq_idx = hw_irq % dgc->irqs_per_chip;
+
+ clear_bit(irq_idx, &gc->installed);
+ irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
+ NULL);
+
+}
+
struct irq_domain_ops irq_generic_chip_ops = {
.map = irq_map_generic_chip,
+ .unmap = irq_unmap_generic_chip,
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index a787aa942..77bdf9807 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -18,14 +18,14 @@ endif
ifdef CONFIG_SCHED_MUQSS
obj-y += MuQSS.o clock.o
else
-obj-y += core.o loadavg.o clock.o cputime.o
+obj-y += core.o loadavg.o clock.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
obj-$(CONFIG_SMP) += cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
endif
-obj-y += wait.o swait.o completion.o idle.o
+obj-y += wait.o swait.o completion.o idle.o cputime.o
obj-$(CONFIG_SMP) += cpupri.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index bd9b1f13f..cf7a95286 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -123,6 +123,7 @@
*/
#define JIFFIES_TO_NS(TIME) ((TIME) * (1073741824 / HZ))
#define JIFFY_NS (1073741824 / HZ)
+#define JIFFY_US (1048576 / HZ)
#define NS_TO_JIFFIES(TIME) ((TIME) / JIFFY_NS)
#define HALF_JIFFY_NS (1073741824 / HZ / 2)
#define HALF_JIFFY_US (1048576 / HZ / 2)
@@ -130,12 +131,13 @@
#define MS_TO_US(TIME) ((TIME) << 10)
#define NS_TO_MS(TIME) ((TIME) >> 20)
#define NS_TO_US(TIME) ((TIME) >> 10)
+#define US_TO_NS(TIME) ((TIME) << 10)
#define RESCHED_US (100) /* Reschedule if less than this many μs left */
void print_scheduler_version(void)
{
- printk(KERN_INFO "MuQSS CPU scheduler v0.115 by Con Kolivas.\n");
+ printk(KERN_INFO "MuQSS CPU scheduler v0.120 by Con Kolivas.\n");
}
/*
@@ -179,9 +181,26 @@ static inline int timeslice(void)
return MS_TO_US(rr_interval);
}
+static bool sched_smp_initialized __read_mostly;
+
+/*
+ * The global runqueue data that all CPUs work off. Contains either atomic
+ * variables and a cpu bitmap set atomically.
+ */
+struct global_rq {
#ifdef CONFIG_SMP
-static cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
+ atomic_t nr_running ____cacheline_aligned_in_smp;
+ atomic_t nr_uninterruptible ____cacheline_aligned_in_smp;
+ atomic64_t nr_switches ____cacheline_aligned_in_smp;
+ cpumask_t cpu_idle_map ____cacheline_aligned_in_smp;
+#else
+ atomic_t nr_running ____cacheline_aligned;
+ atomic_t nr_uninterruptible ____cacheline_aligned;
+ atomic64_t nr_switches ____cacheline_aligned;
+#endif
+};
+#ifdef CONFIG_SMP
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
@@ -213,6 +232,13 @@ static struct root_domain def_root_domain;
#endif /* CONFIG_SMP */
+/* There can be only one */
+#ifdef CONFIG_SMP
+static struct global_rq grq ____cacheline_aligned_in_smp;
+#else
+static struct global_rq grq ____cacheline_aligned;
+#endif
+
static DEFINE_MUTEX(sched_hotcpu_mutex);
/* cpus with isolated domains */
@@ -768,7 +794,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
*/
if (unlikely(task_on_rq_migrating(prev))) {
sched_info_dequeued(rq, prev);
- rq->nr_running--;
/*
* We move the ownership of prev to the new cpu now. ttwu can't
* activate prev to the wrong cpu since it has to grab this
@@ -779,7 +804,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
raw_spin_lock(&prev->pi_lock);
rq = __task_rq_lock(prev);
- rq->nr_running++;
/* Check that someone else hasn't already queued prev */
if (likely(!task_queued(prev))) {
enqueue_task(rq, prev, 0);
@@ -834,7 +858,7 @@ static inline int ms_longest_deadline_diff(void)
static inline int rq_load(struct rq *rq)
{
- return rq->nr_running;
+ return rq->sl->entries + !rq_idle(rq);
}
static inline bool rq_local(struct rq *rq);
@@ -848,20 +872,24 @@ static inline bool rq_local(struct rq *rq);
*/
static void update_load_avg(struct rq *rq)
{
- /* rq clock can go backwards so skip update if that happens */
- if (likely(rq->clock > rq->load_update)) {
- unsigned long us_interval = (rq->clock - rq->load_update) >> 10;
- long load, curload = rq_load(rq);
+ unsigned long us_interval;
+ long load, curload;
- load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
- if (unlikely(load < 0))
- load = 0;
- load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
- rq->load_avg = load;
- } else
+ if (unlikely(rq->niffies <= rq->load_update))
return;
- rq->load_update = rq->clock;
+ us_interval = NS_TO_US(rq->niffies - rq->load_update);
+ curload = rq_load(rq);
+ load = rq->load_avg - (rq->load_avg * us_interval * 5 / 262144);
+ if (unlikely(load < 0))
+ load = 0;
+ load += curload * curload * SCHED_CAPACITY_SCALE * us_interval * 5 / 262144;
+ /* If this CPU has all the load, make it ramp up quickly */
+ if (curload > load && curload >= atomic_read(&grq.nr_running))
+ load = curload;
+ rq->load_avg = load;
+
+ rq->load_update = rq->niffies;
if (likely(rq_local(rq)))
cpufreq_trigger(rq->niffies, rq->load_avg);
}
@@ -1085,7 +1113,7 @@ static inline void atomic_set_cpu(int cpu, cpumask_t *cpumask)
static inline void set_cpuidle_map(int cpu)
{
if (likely(cpu_online(cpu)))
- atomic_set_cpu(cpu, &cpu_idle_map);
+ atomic_set_cpu(cpu, &grq.cpu_idle_map);
}
static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
@@ -1095,12 +1123,12 @@ static inline void atomic_clear_cpu(int cpu, cpumask_t *cpumask)
static inline void clear_cpuidle_map(int cpu)
{
- atomic_clear_cpu(cpu, &cpu_idle_map);
+ atomic_clear_cpu(cpu, &grq.cpu_idle_map);
}
static bool suitable_idle_cpus(struct task_struct *p)
{
- return (cpumask_intersects(&p->cpus_allowed, &cpu_idle_map));
+ return (cpumask_intersects(&p->cpus_allowed, &grq.cpu_idle_map));
}
/*
@@ -1231,7 +1259,7 @@ static struct rq *resched_best_idle(struct task_struct *p, int cpu)
struct rq *rq;
int best_cpu;
- cpumask_and(&tmpmask, &p->cpus_allowed, &cpu_idle_map);
+ cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map);
best_cpu = best_mask_cpu(cpu, task_rq(p), &tmpmask);
rq = cpu_rq(best_cpu);
if (!smt_schedule(p, rq))
@@ -1343,11 +1371,11 @@ static void activate_task(struct task_struct *p, struct rq *rq)
p->prio = effective_prio(p);
if (task_contributes_to_load(p))
- rq->nr_uninterruptible--;
+ atomic_dec(&grq.nr_uninterruptible);
enqueue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
- rq->nr_running++;
+ atomic_inc(&grq.nr_running);
}
/*
@@ -1357,10 +1385,10 @@ static void activate_task(struct task_struct *p, struct rq *rq)
static inline void deactivate_task(struct task_struct *p, struct rq *rq)
{
if (task_contributes_to_load(p))
- rq->nr_uninterruptible++;
+ atomic_inc(&grq.nr_uninterruptible);
p->on_rq = 0;
- rq->nr_running--;
+ atomic_dec(&grq.nr_running);
sched_info_dequeued(rq, p);
}
@@ -1381,7 +1409,7 @@ void set_task_cpu(struct task_struct *p, unsigned int cpu)
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock)));
#endif
- if (p->wake_cpu == cpu)
+ if (task_cpu(p) == cpu)
return;
trace_sched_migrate_task(p, cpu);
perf_event_task_migrate(p);
@@ -1428,9 +1456,7 @@ static inline void take_task(struct rq *rq, int cpu, struct task_struct *p)
dequeue_task(p_rq, p, DEQUEUE_SAVE);
if (p_rq != rq) {
- p_rq->nr_running--;
sched_info_dequeued(p_rq, p);
- rq->nr_running++;
sched_info_queued(rq, p);
}
set_task_cpu(p, cpu);
@@ -1767,7 +1793,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
#ifdef CONFIG_SMP
if (p->sched_contributes_to_load)
- rq->nr_uninterruptible--;
+ atomic_dec(&grq.nr_uninterruptible);
#endif
ttwu_activate(rq, p);
@@ -1796,8 +1822,6 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
}
#ifdef CONFIG_SMP
-static bool sched_smp_initialized __read_mostly;
-
void sched_ttwu_pending(void)
{
struct rq *rq = this_rq();
@@ -2312,6 +2336,16 @@ int sysctl_schedstats(struct ctl_table *table, int write,
static inline void init_schedstats(void) {}
#endif /* CONFIG_SCHEDSTATS */
+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p);
+
+static void account_task_cpu(struct rq *rq, struct task_struct *p)
+{
+ update_clocks(rq);
+ /* This isn't really a context switch but accounting is the same */
+ update_cpu_clock_switch(rq, p);
+ p->last_ran = rq->niffies;
+}
+
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
@@ -2337,7 +2371,6 @@ void wake_up_new_task(struct task_struct *p)
}
double_rq_lock(rq, new_rq);
- update_clocks(rq);
rq_curr = rq->curr;
/*
@@ -2345,7 +2378,6 @@ void wake_up_new_task(struct task_struct *p)
*/
p->prio = rq_curr->normal_prio;
- activate_task(p, rq);
trace_sched_wakeup_new(p);
/*
@@ -2356,17 +2388,17 @@ void wake_up_new_task(struct task_struct *p)
* modified within schedule() so it is always equal to
* current->deadline.
*/
+ account_task_cpu(rq, rq_curr);
p->last_ran = rq_curr->last_ran;
if (likely(rq_curr->policy != SCHED_FIFO)) {
rq_curr->time_slice /= 2;
- if (unlikely(rq_curr->time_slice < RESCHED_US)) {
+ if (rq_curr->time_slice < RESCHED_US) {
/*
* Forking task has run out of timeslice. Reschedule it and
* start its child with a new time slice and deadline. The
* child will end up running first because its deadline will
* be slightly earlier.
*/
- rq_curr->time_slice = 0;
__set_tsk_resched(rq_curr);
time_slice_expired(p, new_rq);
if (suitable_idle_cpus(p))
@@ -2389,6 +2421,7 @@ void wake_up_new_task(struct task_struct *p)
time_slice_expired(p, new_rq);
try_preempt(p, new_rq);
}
+ activate_task(p, new_rq);
double_rq_unlock(rq, new_rq);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
@@ -2651,6 +2684,22 @@ context_switch(struct rq *rq, struct task_struct *prev,
}
/*
+ * nr_running, nr_uninterruptible and nr_context_switches:
+ *
+ * externally visible scheduler statistics: current number of runnable
+ * threads, total number of context switches performed since bootup.
+ */
+unsigned long nr_running(void)
+{
+ return atomic_read(&grq.nr_running);
+}
+
+static unsigned long nr_uninterruptible(void)
+{
+ return atomic_read(&grq.nr_uninterruptible);
+}
+
+/*
* Check if only the current task is running on the cpu.
*
* Caution: this function does not check that the caller has disabled
@@ -2674,31 +2723,9 @@ bool single_task_running(void)
}
EXPORT_SYMBOL(single_task_running);
-/*
- * nr_running, nr_uninterruptible and nr_context_switches:
- *
- * externally visible scheduler statistics: current number of runnable
- * threads, total number of context switches performed since bootup.
- */
unsigned long long nr_context_switches(void)
{
- long long sum = 0;
- int i;
-
- for_each_possible_cpu(i)
- sum += cpu_rq(i)->nr_switches;
-
- return sum;
-}
-
-unsigned long nr_running(void)
-{
- long i, sum = 0;
-
- for_each_online_cpu(i)
- sum += cpu_rq(i)->nr_running;
-
- return sum;
+ return (unsigned long long)atomic64_read(&grq.nr_switches);
}
unsigned long nr_iowait(void)
@@ -2719,14 +2746,7 @@ unsigned long nr_iowait_cpu(int cpu)
unsigned long nr_active(void)
{
- long i, sum = 0;
-
- for_each_online_cpu(i) {
- sum += cpu_rq(i)->nr_running;
- sum += cpu_rq(i)->nr_uninterruptible;
- }
-
- return sum;
+ return nr_running() + nr_uninterruptible();
}
/*
@@ -2797,116 +2817,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-
-/*
- * There are no locks covering percpu hardirq/softirq time.
- * They are only modified in account_system_vtime, on corresponding CPU
- * with interrupts disabled. So, writes are safe.
- * They are read and saved off onto struct rq in update_rq_clock().
- * This may result in other CPU reading this CPU's irq time and can
- * race with irq/account_system_vtime on this CPU. We would either get old
- * or new value with a side effect of accounting a slice of irq time to wrong
- * task when irq is in progress while we read rq->clock. That is a worthy
- * compromise in place of having locks on each irq in account_system_time.
- */
-static DEFINE_PER_CPU(u64, cpu_hardirq_time);
-static DEFINE_PER_CPU(u64, cpu_softirq_time);
-
-static DEFINE_PER_CPU(u64, irq_start_time);
-static int sched_clock_irqtime;
-
-void enable_sched_clock_irqtime(void)
-{
- sched_clock_irqtime = 1;
-}
-
-void disable_sched_clock_irqtime(void)
-{
- sched_clock_irqtime = 0;
-}
-
-#ifndef CONFIG_64BIT
-static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
-
-static inline void irq_time_write_begin(void)
-{
- __this_cpu_inc(irq_time_seq.sequence);
- smp_wmb();
-}
-
-static inline void irq_time_write_end(void)
-{
- smp_wmb();
- __this_cpu_inc(irq_time_seq.sequence);
-}
-
-static inline u64 irq_time_read(int cpu)
-{
- u64 irq_time;
- unsigned seq;
-
- do {
- seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
- irq_time = per_cpu(cpu_softirq_time, cpu) +
- per_cpu(cpu_hardirq_time, cpu);
- } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
-
- return irq_time;
-}
-#else /* CONFIG_64BIT */
-static inline void irq_time_write_begin(void)
-{
-}
-
-static inline void irq_time_write_end(void)
-{
-}
-
-static inline u64 irq_time_read(int cpu)
-{
- return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
-}
-#endif /* CONFIG_64BIT */
-
-/*
- * Called before incrementing preempt_count on {soft,}irq_enter
- * and before decrementing preempt_count on {soft,}irq_exit.
- */
-void irqtime_account_irq(struct task_struct *curr)
-{
- unsigned long flags;
- s64 delta;
- int cpu;
-
- if (!sched_clock_irqtime)
- return;
-
- local_irq_save(flags);
-
- cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
- __this_cpu_add(irq_start_time, delta);
-
- irq_time_write_begin();
- /*
- * We do not account for softirq time from ksoftirqd here.
- * We want to continue accounting softirq time to ksoftirqd thread
- * in that case, so as not to confuse scheduler with a special task
- * that do not consume any time, but still wants to run.
- */
- if (hardirq_count())
- __this_cpu_add(cpu_hardirq_time, delta);
- else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
- __this_cpu_add(cpu_softirq_time, delta);
-
- irq_time_write_end();
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(irqtime_account_irq);
-
-#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-
#ifdef CONFIG_PARAVIRT
static inline u64 steal_ticks(u64 steal)
{
@@ -2968,89 +2878,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-static void irqtime_account_hi_si(void)
-{
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- u64 latest_ns;
-
- latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time));
- if (latest_ns > cpustat[CPUTIME_IRQ])
- cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy;
-
- latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time));
- if (latest_ns > cpustat[CPUTIME_SOFTIRQ])
- cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy;
-}
-#else /* CONFIG_IRQ_TIME_ACCOUNTING */
-
-#define sched_clock_irqtime (0)
-
-static inline void irqtime_account_hi_si(void)
-{
-}
-#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-
-static __always_inline bool steal_account_process_tick(void)
-{
-#ifdef CONFIG_PARAVIRT
- if (static_key_false(&paravirt_steal_enabled)) {
- u64 steal;
- cputime_t steal_ct;
-
- steal = paravirt_steal_clock(smp_processor_id());
- steal -= this_rq()->prev_steal_time;
-
- /*
- * cputime_t may be less precise than nsecs (eg: if it's
- * based on jiffies). Lets cast the result to cputime
- * granularity and account the rest on the next rounds.
- */
- steal_ct = nsecs_to_cputime(steal);
- this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
-
- account_steal_time(steal_ct);
- return steal_ct;
- }
-#endif
- return false;
-}
-
-/*
- * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
- * tasks (sum on group iteration) belonging to @tsk's group.
- */
-void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
-{
- struct signal_struct *sig = tsk->signal;
- cputime_t utime, stime;
- struct task_struct *t;
- unsigned int seq, nextseq;
- unsigned long flags;
-
- rcu_read_lock();
- /* Attempt a lockless read on the first round. */
- nextseq = 0;
- do {
- seq = nextseq;
- flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
- times->utime = sig->utime;
- times->stime = sig->stime;
- times->sum_exec_runtime = sig->sum_sched_runtime;
-
- for_each_thread(tsk, t) {
- task_cputime(t, &utime, &stime);
- times->utime += utime;
- times->stime += stime;
- times->sum_exec_runtime += task_sched_runtime(t);
- }
- /* If lockless access failed, take the lock. */
- nextseq = 1;
- } while (need_seqretry(&sig->stats_lock, seq));
- done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
- rcu_read_unlock();
-}
-
/*
* On each tick, add the number of nanoseconds to the unbanked variables and
* once one tick's worth has accumulated, account it allowing for accurate
@@ -3175,15 +3002,11 @@ static void pc_user_time(struct rq *rq, struct task_struct *p, unsigned long ns)
* Bank in p->sched_time the ns elapsed since the last tick or switch.
* CPU scheduler quota accounting is also performed here in microseconds.
*/
-static void
-update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
+static void update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
{
s64 account_ns = rq->niffies - p->last_ran;
struct task_struct *idle = rq->idle;
- if (steal_account_process_tick())
- goto ts_account;
-
/* Accurate tick timekeeping */
if (user_mode(get_irq_regs()))
pc_user_time(rq, p, account_ns);
@@ -3192,10 +3015,6 @@ update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
} else
pc_idle_time(rq, idle, account_ns);
- if (sched_clock_irqtime)
- irqtime_account_hi_si();
-
-ts_account:
/* time_slice accounting is done in usecs to avoid overflow on 32bit */
if (p->policy != SCHED_FIFO && p != idle)
p->time_slice -= NS_TO_US(account_ns);
@@ -3208,8 +3027,7 @@ ts_account:
* Bank in p->sched_time the ns elapsed since the last tick or switch.
* CPU scheduler quota accounting is also performed here in microseconds.
*/
-static void
-update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
+static void update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
{
s64 account_ns = rq->niffies - p->last_ran;
struct task_struct *idle = rq->idle;
@@ -3283,133 +3101,86 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
-/* Compatibility crap */
-void account_user_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled)
-{
-}
-
-void account_idle_time(cputime_t cputime)
+#ifdef CONFIG_HIGH_RES_TIMERS
+static inline int hrexpiry_enabled(struct rq *rq)
{
+ if (unlikely(!cpu_active(cpu_of(rq)) || !sched_smp_initialized))
+ return 0;
+ return hrtimer_is_hres_active(&rq->hrexpiry_timer);
}
/*
- * Account guest cpu time to a process.
- * @p: the process that the cpu time gets accounted to
- * @cputime: the cpu time spent in virtual machine since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
+ * Use HR-timers to deliver accurate preemption points.
*/
-static void account_guest_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled)
+static void hrexpiry_clear(struct rq *rq)
{
- u64 *cpustat = kcpustat_this_cpu->cpustat;
-
- /* Add guest time to process. */
- p->utime += (__force u64)cputime;
- p->utimescaled += (__force u64)cputime_scaled;
- account_group_user_time(p, cputime);
- p->gtime += (__force u64)cputime;
-
- /* Add guest time to cpustat. */
- if (task_nice(p) > 0) {
- cpustat[CPUTIME_NICE] += (__force u64)cputime;
- cpustat[CPUTIME_GUEST_NICE] += (__force u64)cputime;
- } else {
- cpustat[CPUTIME_USER] += (__force u64)cputime;
- cpustat[CPUTIME_GUEST] += (__force u64)cputime;
- }
+ if (!hrexpiry_enabled(rq))
+ return;
+ if (hrtimer_active(&rq->hrexpiry_timer))
+ hrtimer_cancel(&rq->hrexpiry_timer);
}
/*
- * Account system cpu time to a process and desired cpustat field
- * @p: the process that the cpu time gets accounted to
- * @cputime: the cpu time spent in kernel space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- * @target_cputime64: pointer to cpustat field that has to be updated
+ * High-resolution time_slice expiry.
+ * Runs from hardirq context with interrupts disabled.
*/
-static inline
-void __account_system_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled, cputime64_t *target_cputime64)
+static enum hrtimer_restart hrexpiry(struct hrtimer *timer)
{
- /* Add system time to process. */
- p->stime += (__force u64)cputime;
- p->stimescaled += (__force u64)cputime_scaled;
- account_group_system_time(p, cputime);
-
- /* Add system time to cpustat. */
- *target_cputime64 += (__force u64)cputime;
-
- /* Account for system time used */
- acct_update_integrals(p);
-}
+ struct rq *rq = container_of(timer, struct rq, hrexpiry_timer);
+ struct task_struct *p;
-/*
- * Account system cpu time to a process.
- * @p: the process that the cpu time gets accounted to
- * @hardirq_offset: the offset to subtract from hardirq_count()
- * @cputime: the cpu time spent in kernel space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- * This is for guest only now.
- */
-void account_system_time(struct task_struct *p, int hardirq_offset,
- cputime_t cputime, cputime_t cputime_scaled)
-{
+ /* This can happen during CPU hotplug / resume */
+ if (unlikely(cpu_of(rq) != smp_processor_id()))
+ goto out;
- if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
- account_guest_time(p, cputime, cputime_scaled);
+ /*
+ * We're doing this without the runqueue lock but this should always
+ * be run on the local CPU. Time slice should run out in __schedule
+ * but we set it to zero here in case niffies is slightly less.
+ */
+ p = rq->curr;
+ p->time_slice = 0;
+ __set_tsk_resched(p);
+out:
+ return HRTIMER_NORESTART;
}
/*
- * Account for involuntary wait time.
- * @steal: the cpu time spent in involuntary wait
+ * Called to set the hrexpiry timer state.
+ *
+ * called with irqs disabled from the local CPU only
*/
-void account_steal_time(cputime_t cputime)
+static void hrexpiry_start(struct rq *rq, u64 delay)
{
- u64 *cpustat = kcpustat_this_cpu->cpustat;
+ if (!hrexpiry_enabled(rq))
+ return;
- cpustat[CPUTIME_STEAL] += (__force u64)cputime;
+ hrtimer_start(&rq->hrexpiry_timer, ns_to_ktime(delay),
+ HRTIMER_MODE_REL_PINNED);
}
-/*
- * Account for idle time.
- * @cputime: the cpu time spent in idle wait
- */
-static void account_idle_times(cputime_t cputime)
+static void init_rq_hrexpiry(struct rq *rq)
{
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- struct rq *rq = this_rq();
-
- if (atomic_read(&rq->nr_iowait) > 0)
- cpustat[CPUTIME_IOWAIT] += (__force u64)cputime;
- else
- cpustat[CPUTIME_IDLE] += (__force u64)cputime;
+ hrtimer_init(&rq->hrexpiry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrexpiry_timer.function = hrexpiry;
}
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-
-void account_process_tick(struct task_struct *p, int user_tick)
+static inline int rq_dither(struct rq *rq)
{
+ if (!hrexpiry_enabled(rq))
+ return HALF_JIFFY_US;
+ return 0;
}
-
-/*
- * Account multiple ticks of steal time.
- * @p: the process from which the cpu time has been stolen
- * @ticks: number of stolen ticks
- */
-void account_steal_ticks(unsigned long ticks)
+#else /* CONFIG_HIGH_RES_TIMERS */
+static inline void init_rq_hrexpiry(struct rq *rq)
{
- account_steal_time(jiffies_to_cputime(ticks));
}
-/*
- * Account multiple ticks of idle time.
- * @ticks: number of stolen ticks
- */
-void account_idle_ticks(unsigned long ticks)
+static inline int rq_dither(struct rq *rq)
{
- account_idle_times(jiffies_to_cputime(ticks));
+ return HALF_JIFFY_US;
}
-#endif
+#endif /* CONFIG_HIGH_RES_TIMERS */
/*
* Functions to test for when SCHED_ISO tasks have used their allocated
@@ -3488,6 +3259,8 @@ static void task_running_tick(struct rq *rq)
* allowed to run into the 2nd half of the next tick if they will
* run out of time slice in the interim. Otherwise, if they have
* less than RESCHED_US μs of time slice left they will be rescheduled.
+ * Dither is used as a backup for when hrexpiry is disabled or high res
+ * timers not configured in.
*/
if (p->time_slice - rq->dither >= RESCHED_US)
return;
@@ -3497,6 +3270,60 @@ out_resched:
rq_unlock(rq);
}
+#ifdef CONFIG_NO_HZ_FULL
+/*
+ * We can stop the timer tick any time highres timers are active since
+ * we rely entirely on highres timeouts for task expiry rescheduling.
+ */
+static void sched_stop_tick(struct rq *rq, int cpu)
+{
+ if (!hrexpiry_enabled(rq))
+ return;
+ if (!tick_nohz_full_enabled())
+ return;
+ if (!tick_nohz_full_cpu(cpu))
+ return;
+ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+
+static inline void sched_start_tick(struct rq *rq, int cpu)
+{
+ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+
+/**
+ * scheduler_tick_max_deferment
+ *
+ * Keep at least one tick per second when a single
+ * active task is running.
+ *
+ * This makes sure that uptime continues to move forward, even
+ * with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
+ */
+u64 scheduler_tick_max_deferment(void)
+{
+ struct rq *rq = this_rq();
+ unsigned long next, now = READ_ONCE(jiffies);
+
+ next = rq->last_jiffy + HZ;
+
+ if (time_before_eq(next, now))
+ return 0;
+
+ return jiffies_to_nsecs(next - now);
+}
+#else
+static inline void sched_stop_tick(struct rq *rq, int cpu)
+{
+}
+
+static inline void sched_start_tick(struct rq *rq, int cpu)
+{
+}
+#endif
+
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
@@ -3507,7 +3334,7 @@ void scheduler_tick(void)
struct rq *rq = cpu_rq(cpu);
sched_clock_tick();
- update_rq_clock(rq);
+ update_clocks(rq);
update_load_avg(rq);
update_cpu_clock_tick(rq, rq->curr);
if (!rq_idle(rq))
@@ -3517,6 +3344,7 @@ void scheduler_tick(void)
rq->last_scheduler_tick = rq->last_jiffy;
rq->last_tick = rq->clock;
perf_event_task_tick();
+ sched_stop_tick(rq, cpu);
}
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
@@ -3799,6 +3627,17 @@ static inline void schedule_debug(struct task_struct *prev)
*/
static inline void set_rq_task(struct rq *rq, struct task_struct *p)
{
+#ifdef CONFIG_HIGH_RES_TIMERS
+ if (p == rq->idle || p->policy == SCHED_FIFO)
+ hrexpiry_clear(rq);
+ else
+ hrexpiry_start(rq, US_TO_NS(p->time_slice));
+#endif /* CONFIG_HIGH_RES_TIMERS */
+ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
+ rq->dither = 0;
+ else
+ rq->dither = rq_dither(rq);
+
rq->rq_deadline = p->deadline;
rq->rq_prio = p->prio;
#ifdef CONFIG_SMT_NICE
@@ -3980,10 +3819,6 @@ static void __sched notrace __schedule(bool preempt)
update_clocks(rq);
niffies = rq->niffies;
update_cpu_clock_switch(rq, prev);
- if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
- rq->dither = 0;
- else
- rq->dither = HALF_JIFFY_US;
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
@@ -3994,15 +3829,15 @@ static void __sched notrace __schedule(bool preempt)
}
next = earliest_deadline_task(rq, cpu, idle);
- if (likely(next->prio != PRIO_LIMIT)) {
+ if (likely(next->prio != PRIO_LIMIT))
clear_cpuidle_map(cpu);
- next->last_ran = niffies;
- } else {
+ else {
set_cpuidle_map(cpu);
update_load_avg(rq);
}
set_rq_task(rq, next);
+ next->last_ran = niffies;
if (likely(prev != next)) {
/*
@@ -4014,14 +3849,16 @@ static void __sched notrace __schedule(bool preempt)
check_siblings(rq);
else
wake_siblings(rq);
- rq->nr_switches++;
+ atomic64_inc(&grq.nr_switches);
rq->curr = next;
++*switch_count;
trace_sched_switch(preempt, prev, next);
rq = context_switch(rq, prev, next); /* unlocks the rq */
- } else
+ } else {
+ check_siblings(rq);
rq_unlock_irq(rq);
+ }
}
static inline void sched_submit_work(struct task_struct *tsk)
@@ -5607,8 +5444,12 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
__do_set_cpus_allowed(p, new_mask);
if (needs_other_cpu(p, task_cpu(p))) {
+ struct rq *rq;
+
set_task_cpu(p, valid_task_cpu(p));
+ rq = __task_rq_lock(p);
resched_task(p);
+ __task_rq_unlock(rq);
}
}
@@ -5641,6 +5482,7 @@ void init_idle(struct task_struct *idle, int cpu)
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
idle->last_ran = rq->niffies;
+ time_slice_expired(idle, rq);
idle->state = TASK_RUNNING;
/* Setting prio to illegal value shouldn't matter when never queued */
idle->prio = PRIO_LIMIT;
@@ -7439,6 +7281,8 @@ int sched_cpu_dying(unsigned int cpu)
}
bind_zero(cpu);
double_rq_unlock(rq, cpu_rq(0));
+ sched_start_tick(rq, cpu);
+ hrexpiry_clear(rq);
local_irq_restore(flags);
return 0;
@@ -7463,7 +7307,7 @@ static const cpumask_t *thread_cpumask(int cpu)
/* All this CPU's SMT siblings are idle */
static bool siblings_cpu_idle(struct rq *rq)
{
- return cpumask_subset(&rq->thread_mask, &cpu_idle_map);
+ return cpumask_subset(&rq->thread_mask, &grq.cpu_idle_map);
}
#endif
#ifdef CONFIG_SCHED_MC
@@ -7474,7 +7318,7 @@ static const cpumask_t *core_cpumask(int cpu)
/* All this CPU's shared cache siblings are idle */
static bool cache_cpu_idle(struct rq *rq)
{
- return cpumask_subset(&rq->core_mask, &cpu_idle_map);
+ return cpumask_subset(&rq->core_mask, &grq.cpu_idle_map);
}
#endif
@@ -7609,6 +7453,7 @@ void __init sched_init_smp(void)
#else
void __init sched_init_smp(void)
{
+ sched_smp_initialized = true;
}
#endif /* CONFIG_SMP */
@@ -7655,11 +7500,14 @@ void __init sched_init(void)
for (i = 1 ; i < NICE_WIDTH ; i++)
prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
+ atomic_set(&grq.nr_running, 0);
+ atomic_set(&grq.nr_uninterruptible, 0);
+ atomic64_set(&grq.nr_switches, 0);
skiplist_node_init(&init_task.node);
#ifdef CONFIG_SMP
init_defrootdomain();
- cpumask_clear(&cpu_idle_map);
+ cpumask_clear(&grq.cpu_idle_map);
#else
uprq = &per_cpu(runqueues, 0);
#endif
@@ -7673,7 +7521,6 @@ void __init sched_init(void)
#endif /* CONFIG_CGROUP_SCHED */
for_each_possible_cpu(i) {
rq = cpu_rq(i);
- rq->nr_running = rq->nr_uninterruptible = rq->nr_switches = 0;
skiplist_init(&rq->node);
rq->sl = new_skiplist(&rq->node);
raw_spin_lock_init(&rq->lock);
@@ -7692,6 +7539,7 @@ void __init sched_init(void)
rq->cpu = i;
rq_attach_root(rq, &def_root_domain);
#endif
+ init_rq_hrexpiry(rq);
atomic_set(&rq->nr_iowait, 0);
}
@@ -7899,199 +7747,6 @@ void set_curr_task(int cpu, struct task_struct *p)
#endif
-/*
- * Use precise platform statistics if available:
- */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-
-#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
-void vtime_common_task_switch(struct task_struct *prev)
-{
- if (is_idle_task(prev))
- vtime_account_idle(prev);
- else
- vtime_account_system(prev);
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- vtime_account_user(prev);
-#endif
- arch_vtime_task_switch(prev);
-}
-#endif
-
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
- *ut = p->utime;
- *st = p->stime;
-}
-EXPORT_SYMBOL_GPL(task_cputime_adjusted);
-
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
- struct task_cputime cputime;
-
- thread_group_cputime(p, &cputime);
-
- *ut = cputime.utime;
- *st = cputime.stime;
-}
-
-void vtime_account_system_irqsafe(struct task_struct *tsk)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- vtime_account_system(tsk);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
-
-/*
- * Archs that account the whole time spent in the idle task
- * (outside irq) as idle time can rely on this and just implement
- * vtime_account_system() and vtime_account_idle(). Archs that
- * have other meaning of the idle time (s390 only includes the
- * time spent by the CPU when it's in low power mode) must override
- * vtime_account().
- */
-#ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_account_irq_enter(struct task_struct *tsk)
-{
- if (!in_interrupt() && is_idle_task(tsk))
- vtime_account_idle(tsk);
- else
- vtime_account_system(tsk);
-}
-EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
-#endif /* __ARCH_HAS_VTIME_ACCOUNT */
-
-#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-/*
- * Perform (stime * rtime) / total, but avoid multiplication overflow by
- * losing precision when the numbers are big.
- */
-static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
-{
- u64 scaled;
-
- for (;;) {
- /* Make sure "rtime" is the bigger of stime/rtime */
- if (stime > rtime) {
- u64 tmp = rtime; rtime = stime; stime = tmp;
- }
-
- /* Make sure 'total' fits in 32 bits */
- if (total >> 32)
- goto drop_precision;
-
- /* Does rtime (and thus stime) fit in 32 bits? */
- if (!(rtime >> 32))
- break;
-
- /* Can we just balance rtime/stime rather than dropping bits? */
- if (stime >> 31)
- goto drop_precision;
-
- /* We can grow stime and shrink rtime and try to make them both fit */
- stime <<= 1;
- rtime >>= 1;
- continue;
-
-drop_precision:
- /* We drop from rtime, it has more bits than stime */
- rtime >>= 1;
- total >>= 1;
- }
-
- /*
- * Make sure gcc understands that this is a 32x32->64 multiply,
- * followed by a 64/32->64 divide.
- */
- scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
- return (__force cputime_t) scaled;
-}
-
-/*
- * Adjust tick based cputime random precision against scheduler
- * runtime accounting.
- */
-static void cputime_adjust(struct task_cputime *curr,
- struct prev_cputime *prev,
- cputime_t *ut, cputime_t *st)
-{
- cputime_t rtime, stime, utime, total;
-
- stime = curr->stime;
- total = stime + curr->utime;
-
- /*
- * Tick based cputime accounting depend on random scheduling
- * timeslices of a task to be interrupted or not by the timer.
- * Depending on these circumstances, the number of these interrupts
- * may be over or under-optimistic, matching the real user and system
- * cputime with a variable precision.
- *
- * Fix this by scaling these tick based values against the total
- * runtime accounted by the CFS scheduler.
- */
- rtime = nsecs_to_cputime(curr->sum_exec_runtime);
-
- /*
- * Update userspace visible utime/stime values only if actual execution
- * time is bigger than already exported. Note that can happen, that we
- * provided bigger values due to scaling inaccuracy on big numbers.
- */
- if (prev->stime + prev->utime >= rtime)
- goto out;
-
- if (total) {
- stime = scale_stime((__force u64)stime,
- (__force u64)rtime, (__force u64)total);
- utime = rtime - stime;
- } else {
- stime = rtime;
- utime = 0;
- }
-
- /*
- * If the tick based count grows faster than the scheduler one,
- * the result of the scaling may go backward.
- * Let's enforce monotonicity.
- */
- prev->stime = max(prev->stime, stime);
- prev->utime = max(prev->utime, utime);
-
-out:
- *ut = prev->utime;
- *st = prev->stime;
-}
-
-void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
- struct task_cputime cputime = {
- .sum_exec_runtime = tsk_seruntime(p),
- };
-
- task_cputime(p, &cputime.utime, &cputime.stime);
- cputime_adjust(&cputime, &p->prev_cputime, ut, st);
-}
-EXPORT_SYMBOL_GPL(task_cputime_adjusted);
-
-/*
- * Must be called with siglock held.
- */
-void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
-{
- struct task_cputime cputime;
-
- thread_group_cputime(p, &cputime);
- cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
-}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
void init_idle_bootup_task(struct task_struct *idle)
{}
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
index 10a12b335..f9510d739 100644
--- a/kernel/sched/MuQSS.h
+++ b/kernel/sched/MuQSS.h
@@ -2,6 +2,7 @@
#include <linux/cpuidle.h>
#include <linux/skip_list.h>
#include <linux/stop_machine.h>
+#include "cpuacct.h"
#ifndef MUQSS_SCHED_H
#define MUQSS_SCHED_H
@@ -17,9 +18,6 @@
struct rq {
struct task_struct *curr, *idle, *stop;
struct mm_struct *prev_mm;
- long nr_uninterruptible;
- s64 nr_switches;
- int nr_running;
raw_spinlock_t lock;
@@ -88,6 +86,10 @@ struct rq {
int iso_ticks;
bool iso_refractory;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ struct hrtimer hrexpiry_timer;
+#endif
+
#ifdef CONFIG_SCHEDSTATS
/* latency stats */
@@ -247,6 +249,55 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
}
#endif
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
+DECLARE_PER_CPU(u64, cpu_hardirq_time);
+DECLARE_PER_CPU(u64, cpu_softirq_time);
+
+#ifndef CONFIG_64BIT
+DECLARE_PER_CPU(seqcount_t, irq_time_seq);
+
+static inline void irq_time_write_begin(void)
+{
+ __this_cpu_inc(irq_time_seq.sequence);
+ smp_wmb();
+}
+
+static inline void irq_time_write_end(void)
+{
+ smp_wmb();
+ __this_cpu_inc(irq_time_seq.sequence);
+}
+
+static inline u64 irq_time_read(int cpu)
+{
+ u64 irq_time;
+ unsigned seq;
+
+ do {
+ seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
+ irq_time = per_cpu(cpu_softirq_time, cpu) +
+ per_cpu(cpu_hardirq_time, cpu);
+ } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
+
+ return irq_time;
+}
+#else /* CONFIG_64BIT */
+static inline void irq_time_write_begin(void)
+{
+}
+
+static inline void irq_time_write_end(void)
+{
+}
+
+static inline u64 irq_time_read(int cpu)
+{
+ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
+}
+#endif /* CONFIG_64BIT */
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
#ifdef CONFIG_CPU_FREQ
DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a846cf89e..f09077a45 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -4,7 +4,12 @@
#include <linux/kernel_stat.h>
#include <linux/static_key.h>
#include <linux/context_tracking.h>
+#ifdef CONFIG_SCHED_MUQSS
+#include "MuQSS.h"
+#include "stats.h"
+#else
#include "sched.h"
+#endif
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
@@ -671,7 +676,7 @@ out:
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime = {
- .sum_exec_runtime = p->se.sum_exec_runtime,
+ .sum_exec_runtime = tsk_seruntime(p),
};
task_cputime(p, &cputime.utime, &cputime.stime);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4309c8e76..b728c4117 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -481,17 +481,23 @@ static inline int entity_before(struct sched_entity *a,
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
+ struct sched_entity *curr = cfs_rq->curr;
+
u64 vruntime = cfs_rq->min_vruntime;
- if (cfs_rq->curr)
- vruntime = cfs_rq->curr->vruntime;
+ if (curr) {
+ if (curr->on_rq)
+ vruntime = curr->vruntime;
+ else
+ curr = NULL;
+ }
if (cfs_rq->rb_leftmost) {
struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
struct sched_entity,
run_node);
- if (!cfs_rq->curr)
+ if (!curr)
vruntime = se->vruntime;
else
vruntime = min_vruntime(vruntime, se->vruntime);
@@ -705,7 +711,14 @@ void init_entity_runnable_average(struct sched_entity *se)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(se->load.weight);
+ /*
+ * Tasks are intialized with full load to be seen as heavy tasks until
+ * they get a chance to stabilize to their real load level.
+ * Group entities are intialized with zero load to reflect the fact that
+ * nothing has been attached to the task group yet.
+ */
+ if (entity_is_task(se))
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
/*
* At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -3484,9 +3497,10 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
account_entity_dequeue(cfs_rq, se);
/*
- * Normalize the entity after updating the min_vruntime because the
- * update can refer to the ->curr item and we need to reflect this
- * movement in our normalized position.
+ * Normalize after update_curr(); which will also have moved
+ * min_vruntime if @se is the one holding it back. But before doing
+ * update_min_vruntime() again, which will discount @se's position and
+ * can move min_vruntime forward still more.
*/
if (!(flags & DEQUEUE_SLEEP))
se->vruntime -= cfs_rq->min_vruntime;
@@ -3494,8 +3508,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
- update_min_vruntime(cfs_rq);
update_cfs_shares(cfs_rq);
+
+ /*
+ * Now advance min_vruntime if @se was the entity holding it back,
+ * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
+ * put back on, and if we advance min_vruntime, we'll be placed back
+ * further than we started -- ie. we'll be penalized.
+ */
+ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
+ update_min_vruntime(cfs_rq);
}
/*
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 10e18d267..4008d9f95 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -89,7 +89,7 @@ config NO_HZ_IDLE
config NO_HZ_FULL
bool "Full dynticks system (tickless)"
# NO_HZ_COMMON dependency
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS && !SCHED_MUQSS
+ depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
# We need at least one periodic CPU for timekeeping
depends on SMP
depends on HAVE_CONTEXT_TRACKING
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 2c5bc77c0..b96deed54 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -198,8 +198,13 @@ int clockevents_tick_resume(struct clock_event_device *dev)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
+#ifdef CONFIG_SCHED_MUQSS
+/* Limit min_delta to 100us */
+#define MIN_DELTA_LIMIT (NSEC_PER_SEC / 10000)
+#else
/* Limit min_delta to a jiffie */
#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
+#endif
/**
* clockevents_increase_min_delta - raise minimum delta of a clock event device
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5b79f9786..6f1b7a77e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1323,7 +1323,7 @@ config RCU_PERF_TEST
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
- depends on DEBUG_KERNEL && !SCHED_MUQSS
+ depends on DEBUG_KERNEL
select TORTURE_TEST
select SRCU
select TASKS_RCU
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 603bdd01e..770d83eb3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1437,22 +1437,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
/*
* Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use (including surplus) hugepages.
+ * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
+ * number of free hugepages would be reduced below the number of reserved
+ * hugepages.
*/
-static void dissolve_free_huge_page(struct page *page)
+static int dissolve_free_huge_page(struct page *page)
{
+ int rc = 0;
+
spin_lock(&hugetlb_lock);
if (PageHuge(page) && !page_count(page)) {
struct page *head = compound_head(page);
struct hstate *h = page_hstate(head);
int nid = page_to_nid(head);
+ if (h->free_huge_pages - h->resv_huge_pages == 0) {
+ rc = -EBUSY;
+ goto out;
+ }
list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->max_huge_pages--;
update_and_free_page(h, head);
}
+out:
spin_unlock(&hugetlb_lock);
+ return rc;
}
/*
@@ -1460,16 +1470,28 @@ static void dissolve_free_huge_page(struct page *page)
* make specified memory blocks removable from the system.
* Note that this will dissolve a free gigantic hugepage completely, if any
* part of it lies within the given range.
+ * Also note that if dissolve_free_huge_page() returns with an error, all
+ * free hugepages that were dissolved before that error are lost.
*/
-void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
+ struct page *page;
+ int rc = 0;
if (!hugepages_supported())
- return;
+ return rc;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
+ page = pfn_to_page(pfn);
+ if (PageHuge(page) && !page_count(page)) {
+ rc = dissolve_free_huge_page(page);
+ if (rc)
+ break;
+ }
+ }
- for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
- dissolve_free_huge_page(pfn_to_page(pfn));
+ return rc;
}
/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9d29ba0f7..962927309 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1945,7 +1945,9 @@ repeat:
* dissolve free hugepages in the memory block before doing offlining
* actually in order to make hugetlbfs's object counting consistent.
*/
- dissolve_free_huge_pages(start_pfn, end_pfn);
+ ret = dissolve_free_huge_pages(start_pfn, end_pfn);
+ if (ret)
+ goto failed_removal;
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bf168838a..e72581da9 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -473,7 +473,16 @@ static int xs_nospace(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
/* Race breaker in case memory is freed before above code is called */
- sk->sk_write_space(sk);
+ if (ret == -EAGAIN) {
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
+ rcu_read_unlock();
+
+ sk->sk_write_space(sk);
+ }
return ret;
}
diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
index 9c22f9583..19d41da79 100644
--- a/sound/pci/hda/dell_wmi_helper.c
+++ b/sound/pci/hda/dell_wmi_helper.c
@@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec,
removefunc = true;
if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) {
dell_led_value = 0;
- if (spec->gen.num_adc_nids > 1)
+ if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
else {
dell_old_cap_hook = spec->gen.cap_sync_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index f0955fd7a..6a2330229 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -62,7 +62,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
removefunc = false;
}
if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
- if (spec->num_adc_nids > 1)
+ if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
codec_dbg(codec,
"Skipping micmute LED control due to several ADCs");
else {
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 3774b117d..49b65d481 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -255,7 +255,7 @@ static struct snd_soc_ops broxton_da7219_ops = {
/* broxton digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link broxton_dais[] = {
/* Front End DAI links */
- [BXT_DPCM_AUDIO_PB]
+ [BXT_DPCM_AUDIO_PB] =
{
.name = "Bxt Audio Port",
.stream_name = "Audio",
@@ -271,7 +271,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dpcm_playback = 1,
.ops = &broxton_da7219_fe_ops,
},
- [BXT_DPCM_AUDIO_CP]
+ [BXT_DPCM_AUDIO_CP] =
{
.name = "Bxt Audio Capture Port",
.stream_name = "Audio Record",
@@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.dpcm_capture = 1,
.ops = &broxton_da7219_fe_ops,
},
- [BXT_DPCM_AUDIO_REF_CP]
+ [BXT_DPCM_AUDIO_REF_CP] =
{
.name = "Bxt Audio Reference cap",
.stream_name = "Refcap",
@@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI1_PB]
+ [BXT_DPCM_AUDIO_HDMI1_PB] =
{
.name = "Bxt HDMI Port1",
.stream_name = "Hdmi1",
@@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI2_PB]
+ [BXT_DPCM_AUDIO_HDMI2_PB] =
{
.name = "Bxt HDMI Port2",
.stream_name = "Hdmi2",
@@ -326,7 +326,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI3_PB]
+ [BXT_DPCM_AUDIO_HDMI3_PB] =
{
.name = "Bxt HDMI Port3",
.stream_name = "Hdmi3",
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 253d7bfbf..d610bdca1 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -271,7 +271,7 @@ static const struct snd_soc_ops broxton_rt286_fe_ops = {
/* broxton digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link broxton_rt298_dais[] = {
/* Front End DAI links */
- [BXT_DPCM_AUDIO_PB]
+ [BXT_DPCM_AUDIO_PB] =
{
.name = "Bxt Audio Port",
.stream_name = "Audio",
@@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dpcm_playback = 1,
.ops = &broxton_rt286_fe_ops,
},
- [BXT_DPCM_AUDIO_CP]
+ [BXT_DPCM_AUDIO_CP] =
{
.name = "Bxt Audio Capture Port",
.stream_name = "Audio Record",
@@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dpcm_capture = 1,
.ops = &broxton_rt286_fe_ops,
},
- [BXT_DPCM_AUDIO_REF_CP]
+ [BXT_DPCM_AUDIO_REF_CP] =
{
.name = "Bxt Audio Reference cap",
.stream_name = "refcap",
@@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_DMIC_CP]
+ [BXT_DPCM_AUDIO_DMIC_CP] =
{
.name = "Bxt Audio DMIC cap",
.stream_name = "dmiccap",
@@ -327,7 +327,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.dynamic = 1,
.ops = &broxton_dmic_ops,
},
- [BXT_DPCM_AUDIO_HDMI1_PB]
+ [BXT_DPCM_AUDIO_HDMI1_PB] =
{
.name = "Bxt HDMI Port1",
.stream_name = "Hdmi1",
@@ -340,7 +340,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI2_PB]
+ [BXT_DPCM_AUDIO_HDMI2_PB] =
{
.name = "Bxt HDMI Port2",
.stream_name = "Hdmi2",
@@ -353,7 +353,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
.nonatomic = 1,
.dynamic = 1,
},
- [BXT_DPCM_AUDIO_HDMI3_PB]
+ [BXT_DPCM_AUDIO_HDMI3_PB] =
{
.name = "Bxt HDMI Port3",
.stream_name = "Hdmi3",
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index d908ff8f9..801082fdc 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -823,6 +823,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w,
case snd_soc_dapm_switch:
case snd_soc_dapm_mixer:
case snd_soc_dapm_pga:
+ case snd_soc_dapm_out_drv:
wname_in_long_name = true;
kcname_in_long_name = true;
break;
@@ -3049,6 +3050,9 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
}
mutex_unlock(&card->dapm_mutex);
+ if (ret)
+ return ret;
+
if (invert)
ucontrol->value.integer.value[0] = max - val;
else
@@ -3200,7 +3204,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
if (e->shift_l != e->shift_r) {
if (item[1] > e->items)
return -EINVAL;
- val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l;
+ val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
mask |= e->mask << e->shift_r;
}
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index ee7f15aa4..34069076b 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1475,6 +1475,7 @@ widget:
if (widget == NULL) {
dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
w->name);
+ ret = -ENOMEM;
goto hdr_err;
}
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 7ed72a475..e4b717e9e 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -20,7 +20,6 @@
#endif
#ifdef __powerpc__
-#include "../../arch/powerpc/include/uapi/asm/unistd.h"
#define CPUINFO_PROC {"cpu"}
#endif
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 13d414384..7aee954b3 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1091,7 +1091,6 @@ static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent);
ui_browser__printf(arg->b, "%s", hpp->buf);
- advance_hpp(hpp, ret);
return ret;
}
@@ -2046,6 +2045,7 @@ void hist_browser__init(struct hist_browser *browser,
struct hists *hists)
{
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *node;
browser->hists = hists;
browser->b.refresh = hist_browser__refresh;
@@ -2058,6 +2058,11 @@ void hist_browser__init(struct hist_browser *browser,
perf_hpp__reset_width(fmt, hists);
++browser->b.columns;
}
+ /* hierarchy entries have their own hpp list */
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format(&node->hpp, fmt)
+ perf_hpp__reset_width(fmt, hists);
+ }
}
struct hist_browser *hist_browser__new(struct hists *hists)
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index f04a63112..d0cae7540 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -628,14 +628,6 @@ hists__fprintf_hierarchy_headers(struct hists *hists,
struct perf_hpp *hpp,
FILE *fp)
{
- struct perf_hpp_list_node *fmt_node;
- struct perf_hpp_fmt *fmt;
-
- list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
- perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
- perf_hpp__reset_width(fmt, hists);
- }
-
return print_hierarchy_header(hists, hpp, symbol_conf.field_sep, fp);
}
@@ -714,6 +706,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
bool use_callchain)
{
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *node;
struct rb_node *nd;
size_t ret = 0;
const char *sep = symbol_conf.field_sep;
@@ -726,6 +719,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
hists__for_each_format(hists, fmt)
perf_hpp__reset_width(fmt, hists);
+ /* hierarchy entries have their own hpp list */
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format(&node->hpp, fmt)
+ perf_hpp__reset_width(fmt, hists);
+ }
if (symbol_conf.col_width_list_str)
perf_hpp__set_user_width(symbol_conf.col_width_list_str);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 4f979bb27..7123f4de3 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -437,7 +437,7 @@ add_bpf_output_values(struct bt_ctf_event_class *event_class,
int ret;
if (nr_elements * sizeof(u32) != raw_size)
- pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
+ pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
raw_size, nr_elements * sizeof(u32) - raw_size);
len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 8ff6c6a61..c9c8dc330 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -89,6 +89,7 @@ struct intel_pt_decoder {
bool pge;
bool have_tma;
bool have_cyc;
+ bool fixup_last_mtc;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@@ -584,10 +585,31 @@ struct intel_pt_calc_cyc_to_tsc_info {
uint64_t tsc_timestamp;
uint64_t timestamp;
bool have_tma;
+ bool fixup_last_mtc;
bool from_mtc;
double cbr_cyc_to_tsc;
};
+/*
+ * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
+ * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
+ * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
+ * packet by copying the missing bits from the current MTC assuming the least
+ * difference between the two, and that the current MTC comes after last_mtc.
+ */
+static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
+ uint32_t *last_mtc)
+{
+ uint32_t first_missing_bit = 1U << (16 - mtc_shift);
+ uint32_t mask = ~(first_missing_bit - 1);
+
+ *last_mtc |= mtc & mask;
+ if (*last_mtc >= mtc) {
+ *last_mtc -= first_missing_bit;
+ *last_mtc &= 0xff;
+ }
+}
+
static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
{
struct intel_pt_decoder *decoder = pkt_info->decoder;
@@ -617,6 +639,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
return 0;
mtc = pkt_info->packet.payload;
+ if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
+ data->fixup_last_mtc = false;
+ intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
+ &data->last_mtc);
+ }
if (mtc > data->last_mtc)
mtc_delta = mtc - data->last_mtc;
else
@@ -685,6 +712,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
data->ctc_delta = 0;
data->have_tma = true;
+ data->fixup_last_mtc = true;
return 0;
@@ -751,6 +779,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
.tsc_timestamp = decoder->tsc_timestamp,
.timestamp = decoder->timestamp,
.have_tma = decoder->have_tma,
+ .fixup_last_mtc = decoder->fixup_last_mtc,
.from_mtc = from_mtc,
.cbr_cyc_to_tsc = 0,
};
@@ -1241,6 +1270,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
}
decoder->ctc_delta = 0;
decoder->have_tma = true;
+ decoder->fixup_last_mtc = true;
intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
}
@@ -1255,6 +1285,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
mtc = decoder->packet.payload;
+ if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
+ decoder->fixup_last_mtc = false;
+ intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
+ &decoder->last_mtc);
+ }
+
if (mtc > decoder->last_mtc)
mtc_delta = mtc - decoder->last_mtc;
else
@@ -1323,6 +1359,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
timestamp, decoder->timestamp);
else
decoder->timestamp = timestamp;
+
+ decoder->timestamp_insn_cnt = 0;
}
/* Walk PSB+ packets when already in sync. */
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 551ff6f64..b2878d2b2 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -241,7 +241,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
}
queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
-
+next:
buffer = auxtrace_buffer__next(queue, buffer);
if (!buffer) {
if (old_buffer)
@@ -264,9 +264,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
return -ENOMEM;
- if (old_buffer)
- auxtrace_buffer__drop_data(old_buffer);
-
if (buffer->use_data) {
b->len = buffer->use_size;
b->buf = buffer->use_data;
@@ -276,6 +273,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
}
b->ref_timestamp = buffer->reference;
+ /*
+ * If in snapshot mode and the buffer has no usable data, get next
+ * buffer and again check overlap against old_buffer.
+ */
+ if (ptq->pt->snapshot_mode && !b->len)
+ goto next;
+
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+
if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
!buffer->consecutive)) {
b->consecutive = false;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index a811c13a7..f77b31675 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1113,9 +1113,8 @@ new_symbol:
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0) {
- if (!symbol_conf.allow_aliases)
- symbols__fixup_duplicate(&dso->symbols[map->type]);
symbols__fixup_end(&dso->symbols[map->type]);
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
if (kmap) {
/*
* We need to fixup this here too because we create new
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 37e8d20ae..f29f336ed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -152,6 +152,9 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
struct rb_node *nd;
struct symbol *curr, *next;
+ if (symbol_conf.allow_aliases)
+ return;
+
nd = rb_first(symbols);
while (nd) {
@@ -1234,8 +1237,8 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
if (kallsyms__delta(map, filename, &delta))
return -1;
- symbols__fixup_duplicate(&dso->symbols[map->type]);
symbols__fixup_end(&dso->symbols[map->type]);
+ symbols__fixup_duplicate(&dso->symbols[map->type]);
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index 8a73d8185..f3825b676 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -284,7 +284,7 @@ static void parse_opts(int argc, char *argv[])
static void transfer_escaped_string(int fd, char *str)
{
- size_t size = strlen(str + 1);
+ size_t size = strlen(str);
uint8_t *tx;
uint8_t *rx;