summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-aufs50
-rw-r--r--Documentation/ABI/testing/sysfs-aufs31
-rw-r--r--Documentation/cgroup-v2.txt6
-rw-r--r--Documentation/filesystems/aufs/README391
-rw-r--r--Documentation/filesystems/aufs/design/01intro.txt157
-rw-r--r--Documentation/filesystems/aufs/design/02struct.txt245
-rw-r--r--Documentation/filesystems/aufs/design/03atomic_open.txt72
-rw-r--r--Documentation/filesystems/aufs/design/03lookup.txt100
-rw-r--r--Documentation/filesystems/aufs/design/04branch.txt61
-rw-r--r--Documentation/filesystems/aufs/design/05wbr_policy.txt51
-rw-r--r--Documentation/filesystems/aufs/design/06fhsm.txt105
-rw-r--r--Documentation/filesystems/aufs/design/06mmap.txt59
-rw-r--r--Documentation/filesystems/aufs/design/06xattr.txt81
-rw-r--r--Documentation/filesystems/aufs/design/07export.txt45
-rw-r--r--Documentation/filesystems/aufs/design/08shwh.txt39
-rw-r--r--Documentation/filesystems/aufs/design/10dynop.txt34
-rw-r--r--Documentation/scheduler/sched-BFS.txt357
-rw-r--r--Documentation/sysctl/kernel.txt26
-rw-r--r--MAINTAINERS109
-rw-r--r--Makefile2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi8
-rw-r--r--arch/arc/include/asm/bitops.h15
-rw-r--r--arch/arc/include/asm/io.h18
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts1
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/asm/io.h1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c5
-rw-r--r--arch/s390/include/asm/pci.h2
-rw-r--r--arch/s390/kernel/entry.S106
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/sh/mm/kmap.c2
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/x86/Kconfig45
-rw-r--r--arch/x86/entry/common.c23
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/microcode.h26
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h2
-rw-r--r--arch/x86/kernel/apic/vector.c88
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c38
-rw-r--r--arch/x86/kernel/cpu/perf_event.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event.h3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c27
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c24
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c4
-rw-r--r--arch/x86/kernel/ioport.c29
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kvm/i8254.c12
-rw-r--r--arch/x86/kvm/vmx.c16
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/mm/tlb.c12
-rw-r--r--arch/x86/pci/fixup.c7
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/xtensa/kernel/head.S2
-rw-r--r--arch/xtensa/mm/cache.c8
-rw-r--r--arch/xtensa/platforms/iss/console.c10
-rw-r--r--block/blk-core.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c8
-rw-r--r--crypto/keywrap.c4
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/sleep.c1
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c267
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/bluetooth/ath3k.c8
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c14
-rw-r--r--drivers/char/tpm/tpm_crb.c4
-rw-r--r--drivers/char/tpm/tpm_eventlog.c14
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c48
-rw-r--r--drivers/cpufreq/cpufreq.c7
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c5
-rw-r--r--drivers/cpufreq/intel_pstate.c9
-rw-r--r--drivers/crypto/atmel-aes.c4
-rw-r--r--drivers/crypto/atmel-sha.c4
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c36
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c40
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h22
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/sb_edac.c26
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c5
-rw-r--r--drivers/gpio/gpio-pca953x.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c7
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c16
-rw-r--r--drivers/idle/intel_idle.c108
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c122
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c59
-rw-r--r--drivers/input/misc/ati_remote2.c36
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/powermate.c3
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/md/bcache/super.c46
-rw-r--r--drivers/md/dm-cache-metadata.c98
-rw-r--r--drivers/md/dm-cache-metadata.h4
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-snap.c9
-rw-r--r--drivers/md/dm-table.c36
-rw-r--r--drivers/md/dm-thin-metadata.c5
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c51
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/i2c/adv7511.c21
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c26
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c18
-rw-r--r--drivers/media/platform/coda/coda-bit.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c21
-rw-r--r--drivers/misc/mei/bus.c9
-rw-r--r--drivers/mmc/card/block.c24
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c14
-rw-r--r--drivers/mmc/host/sdhci.c142
-rw-r--r--drivers/mtd/onenand/onenand_base.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/rionet.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/nvdimm/bus.c8
-rw-r--r--drivers/nvdimm/pmem.c11
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/pci/probe.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/scsi/NCR5380.c133
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c37
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aacraid/src.c30
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/atari_NCR5380.c133
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/scsi_common.c12
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/staging/android/ion/ion_test.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c2
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/thermal/thermal_core.c13
-rw-r--r--drivers/tty/serial/8250/8250_port.c18
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c19
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/mct_u232.c9
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/aufs/Kconfig185
-rw-r--r--fs/aufs/Makefile36
-rw-r--r--fs/aufs/aufs.h46
-rw-r--r--fs/aufs/branch.c1394
-rw-r--r--fs/aufs/branch.h266
-rw-r--r--fs/aufs/cpup.c1366
-rw-r--r--fs/aufs/cpup.h81
-rw-r--r--fs/aufs/dbgaufs.c419
-rw-r--r--fs/aufs/dbgaufs.h35
-rw-r--r--fs/aufs/dcsub.c211
-rw-r--r--fs/aufs/dcsub.h123
-rw-r--r--fs/aufs/debug.c425
-rw-r--r--fs/aufs/debug.h212
-rw-r--r--fs/aufs/dentry.c1123
-rw-r--r--fs/aufs/dentry.h221
-rw-r--r--fs/aufs/dinfo.c537
-rw-r--r--fs/aufs/dir.c743
-rw-r--r--fs/aufs/dir.h118
-rw-r--r--fs/aufs/dynop.c356
-rw-r--r--fs/aufs/dynop.h61
-rw-r--r--fs/aufs/export.c817
-rw-r--r--fs/aufs/f_op.c757
-rw-r--r--fs/aufs/fhsm.c412
-rw-r--r--fs/aufs/file.c831
-rw-r--r--fs/aufs/file.h278
-rw-r--r--fs/aufs/finfo.c143
-rw-r--r--fs/aufs/fstype.h387
-rw-r--r--fs/aufs/hfsnotify.c275
-rw-r--r--fs/aufs/hfsplus.c43
-rw-r--r--fs/aufs/hnotify.c697
-rw-r--r--fs/aufs/i_op.c1406
-rw-r--r--fs/aufs/i_op_add.c919
-rw-r--r--fs/aufs/i_op_del.c497
-rw-r--r--fs/aufs/i_op_ren.c1002
-rw-r--r--fs/aufs/iinfo.c264
-rw-r--r--fs/aufs/inode.c514
-rw-r--r--fs/aufs/inode.h672
-rw-r--r--fs/aufs/ioctl.c206
-rw-r--r--fs/aufs/loop.c133
-rw-r--r--fs/aufs/loop.h39
-rw-r--r--fs/aufs/magic.mk30
-rw-r--r--fs/aufs/module.c207
-rw-r--r--fs/aufs/module.h92
-rw-r--r--fs/aufs/mvdown.c690
-rw-r--r--fs/aufs/opts.c1846
-rw-r--r--fs/aufs/opts.h198
-rw-r--r--fs/aufs/plink.c515
-rw-r--r--fs/aufs/poll.c39
-rw-r--r--fs/aufs/posix_acl.c85
-rw-r--r--fs/aufs/procfs.c156
-rw-r--r--fs/aufs/rdu.c376
-rw-r--r--fs/aufs/rwsem.h178
-rw-r--r--fs/aufs/sbinfo.c337
-rw-r--r--fs/aufs/spl.h98
-rw-r--r--fs/aufs/super.c1026
-rw-r--r--fs/aufs/super.h619
-rw-r--r--fs/aufs/sysaufs.c91
-rw-r--r--fs/aufs/sysaufs.h88
-rw-r--r--fs/aufs/sysfs.c340
-rw-r--r--fs/aufs/sysrq.c144
-rw-r--r--fs/aufs/vdir.c875
-rw-r--r--fs/aufs/vfsub.c871
-rw-r--r--fs/aufs/vfsub.h297
-rw-r--r--fs/aufs/wbr_policy.c752
-rw-r--r--fs/aufs/whout.c1047
-rw-r--r--fs/aufs/whout.h72
-rw-r--r--fs/aufs/wkq.c200
-rw-r--r--fs/aufs/wkq.h78
-rw-r--r--fs/aufs/xattr.c331
-rw-r--r--fs/aufs/xino.c1305
-rw-r--r--fs/coredump.c30
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/exec.c1
-rw-r--r--fs/exfat/exfat_super.c23
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/fs-writeback.c37
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/file.c33
-rw-r--r--fs/fuse/fuse_i.h9
-rw-r--r--fs/jbd2/journal.c17
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfsd/nfs4proc.c1
-rw-r--r--fs/nfsd/nfs4xdr.c13
-rw-r--r--fs/notify/group.c4
-rw-r--r--fs/notify/mark.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c24
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c1
-rw-r--r--fs/open.c8
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/nommu.c5
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/proc/task_nommu.c5
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/quota/dquot.c3
-rw-r--r--fs/read_write.c24
-rw-r--r--fs/splice.c15
-rw-r--r--fs/xattr.c1
-rw-r--r--fs/xfs/xfs_attr_list.c19
-rw-r--r--include/asm-generic/bitops/lock.h14
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/init_task.h76
-rw-r--r--include/linux/ioprio.h2
-rw-r--r--include/linux/jiffies.h4
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/mm.h22
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/sched.h91
-rw-r--r--include/linux/sched/prio.h12
-rw-r--r--include/linux/splice.h6
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/aufs_type.h406
-rw-r--r--include/uapi/linux/sched.h9
-rw-r--r--init/Kconfig41
-rw-r--r--init/main.c3
-rw-r--r--kernel/cgroup.c20
-rw-r--r--kernel/delayacct.c2
-rw-r--r--kernel/events/core.c7
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/sched/Makefile11
-rw-r--r--kernel/sched/bfs.c7609
-rw-r--r--kernel/sched/bfs_sched.h181
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/idle.c4
-rw-r--r--kernel/sched/sched.h13
-rw-r--r--kernel/sched/stats.c4
-rw-r--r--kernel/sysctl.c41
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/task_work.c1
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/posix-cpu-timers.c10
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_printk.c3
-rw-r--r--kernel/trace/trace_selftest.c5
-rw-r--r--kernel/watchdog.c9
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--mm/Makefile2
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c44
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/nommu.c10
-rw-r--r--mm/page_alloc.c46
-rw-r--r--mm/prfile.c86
-rw-r--r--net/bluetooth/mgmt.c4
-rw-r--r--scripts/coccinelle/iterators/use_after_iter.cocci2
-rw-r--r--scripts/gdb/linux/modules.py5
-rw-r--r--scripts/gdb/linux/symbols.py2
-rw-r--r--scripts/kconfig/Makefile4
-rwxr-xr-xscripts/package/mkspec8
-rw-r--r--security/commoncap.c2
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/security.c10
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/hda/hdac_device.c16
-rw-r--r--sound/hda/hdac_regmap.c69
-rw-r--r--sound/pci/hda/patch_cirrus.c8
-rw-r--r--sound/pci/hda/patch_conexant.c7
-rw-r--r--sound/pci/hda/patch_hdmi.c43
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/pci/intel8x0.c1
-rw-r--r--sound/usb/clock.c2
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--sound/usb/quirks.c27
-rw-r--r--sound/usb/stream.c6
-rw-r--r--tools/hv/Makefile2
-rw-r--r--tools/perf/util/parse-events.c6
-rw-r--r--tools/perf/util/pmu.c15
-rw-r--r--tools/perf/util/setup.py4
-rw-r--r--virt/kvm/kvm_main.c21
374 files changed, 44306 insertions, 1327 deletions
diff --git a/Documentation/ABI/testing/debugfs-aufs b/Documentation/ABI/testing/debugfs-aufs
new file mode 100644
index 000000000..99642d105
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-aufs
@@ -0,0 +1,50 @@
+What: /debug/aufs/si_<id>/
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ Under /debug/aufs, a directory named si_<id> is created
+ per aufs mount, where <id> is a unique id generated
+ internally.
+
+What: /debug/aufs/si_<id>/plink
+Date: Apr 2013
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It has three lines and shows the information about the
+ pseudo-link. The first line is a single number
+ representing a number of buckets. The second line is a
+ number of pseudo-links per buckets (separated by a
+ blank). The last line is a single number representing a
+ total number of psedo-links.
+ When the aufs mount option 'noplink' is specified, it
+ will show "1\n0\n0\n".
+
+What: /debug/aufs/si_<id>/xib
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the consumed blocks by xib (External Inode Number
+ Bitmap), its block size and file size.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
+
+What: /debug/aufs/si_<id>/xino0, xino1 ... xinoN
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the consumed blocks by xino (External Inode Number
+ Translation Table), its link count, block size and file
+ size.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
+
+What: /debug/aufs/si_<id>/xigen
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the consumed blocks by xigen (External Inode
+ Generation Table), its block size and file size.
+ If CONFIG_AUFS_EXPORT is disabled, this entry will not
+ be created.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/ABI/testing/sysfs-aufs b/Documentation/ABI/testing/sysfs-aufs
new file mode 100644
index 000000000..82f951849
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-aufs
@@ -0,0 +1,31 @@
+What: /sys/fs/aufs/si_<id>/
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ Under /sys/fs/aufs, a directory named si_<id> is created
+ per aufs mount, where <id> is a unique id generated
+ internally.
+
+What: /sys/fs/aufs/si_<id>/br0, br1 ... brN
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the abolute path of a member directory (which
+ is called branch) in aufs, and its permission.
+
+What: /sys/fs/aufs/si_<id>/brid0, brid1 ... bridN
+Date: July 2013
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the id of a member directory (which is called
+ branch) in aufs.
+
+What: /sys/fs/aufs/si_<id>/xi_path
+Date: March 2009
+Contact: J. R. Okajima <hooanon05g@gmail.com>
+Description:
+ It shows the abolute path of XINO (External Inode Number
+ Bitmap, Translation Table and Generation Table) file
+ even if it is the default path.
+ When the aufs mount option 'noxino' is specified, it
+ will be empty. About XINO files, see the aufs manual.
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index ff49cf901..81eb37821 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -1368,6 +1368,12 @@ system than killing the group. Otherwise, memory.max is there to
limit this type of spillover and ultimately contain buggy or even
malicious applications.
+Setting the original memory.limit_in_bytes below the current usage was
+subject to a race condition, where concurrent charges could cause the
+limit setting to fail. memory.max on the other hand will first set the
+limit to prevent new charges, and then reclaim and OOM kill until the
+new limit is met - or the task writing to memory.max is killed.
+
The combined memory+swap accounting and limiting is replaced by real
control over swap space.
diff --git a/Documentation/filesystems/aufs/README b/Documentation/filesystems/aufs/README
new file mode 100644
index 000000000..ed1bafe08
--- /dev/null
+++ b/Documentation/filesystems/aufs/README
@@ -0,0 +1,391 @@
+
+Aufs4 -- advanced multi layered unification filesystem version 4.x
+http://aufs.sf.net
+Junjiro R. Okajima
+
+
+0. Introduction
+----------------------------------------
+In the early days, aufs was entirely re-designed and re-implemented
+Unionfs Version 1.x series. Adding many original ideas, approaches,
+improvements and implementations, it becomes totally different from
+Unionfs while keeping the basic features.
+Recently, Unionfs Version 2.x series begin taking some of the same
+approaches to aufs1's.
+Unionfs is being developed by Professor Erez Zadok at Stony Brook
+University and his team.
+
+Aufs4 supports linux-4.0 and later, and for linux-3.x series try aufs3.
+If you want older kernel version support, try aufs2-2.6.git or
+aufs2-standalone.git repository, aufs1 from CVS on SourceForge.
+
+Note: it becomes clear that "Aufs was rejected. Let's give it up."
+ According to Christoph Hellwig, linux rejects all union-type
+ filesystems but UnionMount.
+<http://marc.info/?l=linux-kernel&m=123938533724484&w=2>
+
+PS. Al Viro seems have a plan to merge aufs as well as overlayfs and
+ UnionMount, and he pointed out an issue around a directory mutex
+ lock and aufs addressed it. But it is still unsure whether aufs will
+ be merged (or any other union solution).
+<http://marc.info/?l=linux-kernel&m=136312705029295&w=1>
+
+
+1. Features
+----------------------------------------
+- unite several directories into a single virtual filesystem. The member
+ directory is called as a branch.
+- you can specify the permission flags to the branch, which are 'readonly',
+ 'readwrite' and 'whiteout-able.'
+- by upper writable branch, internal copyup and whiteout, files/dirs on
+ readonly branch are modifiable logically.
+- dynamic branch manipulation, add, del.
+- etc...
+
+Also there are many enhancements in aufs, such as:
+- test only the highest one for the directory permission (dirperm1)
+- copyup on open (coo=)
+- 'move' policy for copy-up between two writable branches, after
+ checking free space.
+- xattr, acl
+- readdir(3) in userspace.
+- keep inode number by external inode number table
+- keep the timestamps of file/dir in internal copyup operation
+- seekable directory, supporting NFS readdir.
+- whiteout is hardlinked in order to reduce the consumption of inodes
+ on branch
+- do not copyup, nor create a whiteout when it is unnecessary
+- revert a single systemcall when an error occurs in aufs
+- remount interface instead of ioctl
+- maintain /etc/mtab by an external command, /sbin/mount.aufs.
+- loopback mounted filesystem as a branch
+- kernel thread for removing the dir who has a plenty of whiteouts
+- support copyup sparse file (a file which has a 'hole' in it)
+- default permission flags for branches
+- selectable permission flags for ro branch, whether whiteout can
+ exist or not
+- export via NFS.
+- support <sysfs>/fs/aufs and <debugfs>/aufs.
+- support multiple writable branches, some policies to select one
+ among multiple writable branches.
+- a new semantics for link(2) and rename(2) to support multiple
+ writable branches.
+- no glibc changes are required.
+- pseudo hardlink (hardlink over branches)
+- allow a direct access manually to a file on branch, e.g. bypassing aufs.
+ including NFS or remote filesystem branch.
+- userspace wrapper for pathconf(3)/fpathconf(3) with _PC_LINK_MAX.
+- and more...
+
+Currently these features are dropped temporary from aufs4.
+See design/08plan.txt in detail.
+- nested mount, i.e. aufs as readonly no-whiteout branch of another aufs
+ (robr)
+- statistics of aufs thread (/sys/fs/aufs/stat)
+
+Features or just an idea in the future (see also design/*.txt),
+- reorder the branch index without del/re-add.
+- permanent xino files for NFSD
+- an option for refreshing the opened files after add/del branches
+- light version, without branch manipulation. (unnecessary?)
+- copyup in userspace
+- inotify in userspace
+- readv/writev
+
+
+2. Download
+----------------------------------------
+There are three GIT trees for aufs4, aufs4-linux.git,
+aufs4-standalone.git, and aufs-util.git. Note that there is no "4" in
+"aufs-util.git."
+While the aufs-util is always necessary, you need either of aufs4-linux
+or aufs4-standalone.
+
+The aufs4-linux tree includes the whole linux mainline GIT tree,
+git://git.kernel.org/.../torvalds/linux.git.
+And you cannot select CONFIG_AUFS_FS=m for this version, eg. you cannot
+build aufs4 as an external kernel module.
+Several extra patches are not included in this tree. Only
+aufs4-standalone tree contains them. They are described in the later
+section "Configuration and Compilation."
+
+On the other hand, the aufs4-standalone tree has only aufs source files
+and necessary patches, and you can select CONFIG_AUFS_FS=m.
+But you need to apply all aufs patches manually.
+
+You will find GIT branches whose name is in form of "aufs4.x" where "x"
+represents the linux kernel version, "linux-4.x". For instance,
+"aufs4.0" is for linux-4.0. For latest "linux-4.x-rcN", use
+"aufs4.x-rcN" branch.
+
+o aufs4-linux tree
+$ git clone --reference /your/linux/git/tree \
+ git://github.com/sfjro/aufs4-linux.git aufs4-linux.git
+- if you don't have linux GIT tree, then remove "--reference ..."
+$ cd aufs4-linux.git
+$ git checkout origin/aufs4.0
+
+Or You may want to directly git-pull aufs into your linux GIT tree, and
+leave the patch-work to GIT.
+$ cd /your/linux/git/tree
+$ git remote add aufs4 git://github.com/sfjro/aufs4-linux.git
+$ git fetch aufs4
+$ git checkout -b my4.0 v4.0
+$ (add your local change...)
+$ git pull aufs4 aufs4.0
+- now you have v4.0 + your_changes + aufs4.0 in you my4.0 branch.
+- you may need to solve some conflicts between your_changes and
+ aufs4.0. in this case, git-rerere is recommended so that you can
+ solve the similar conflicts automatically when you upgrade to 4.1 or
+ later in the future.
+
+o aufs4-standalone tree
+$ git clone git://github.com/sfjro/aufs4-standalone.git aufs4-standalone.git
+$ cd aufs4-standalone.git
+$ git checkout origin/aufs4.0
+
+o aufs-util tree
+$ git clone git://git.code.sf.net/p/aufs/aufs-util aufs-util.git
+- note that the public aufs-util.git is on SourceForge instead of
+ GitHUB.
+$ cd aufs-util.git
+$ git checkout origin/aufs4.0
+
+Note: The 4.x-rcN branch is to be used with `rc' kernel versions ONLY.
+The minor version number, 'x' in '4.x', of aufs may not always
+follow the minor version number of the kernel.
+Because changes in the kernel that cause the use of a new
+minor version number do not always require changes to aufs-util.
+
+Since aufs-util has its own minor version number, you may not be
+able to find a GIT branch in aufs-util for your kernel's
+exact minor version number.
+In this case, you should git-checkout the branch for the
+nearest lower number.
+
+For (an unreleased) example:
+If you are using "linux-4.10" and the "aufs4.10" branch
+does not exist in aufs-util repository, then "aufs4.9", "aufs4.8"
+or something numerically smaller is the branch for your kernel.
+
+Also you can view all branches by
+ $ git branch -a
+
+
+3. Configuration and Compilation
+----------------------------------------
+Make sure you have git-checkout'ed the correct branch.
+
+For aufs4-linux tree,
+- enable CONFIG_AUFS_FS.
+- set other aufs configurations if necessary.
+
+For aufs4-standalone tree,
+There are several ways to build.
+
+1.
+- apply ./aufs4-kbuild.patch to your kernel source files.
+- apply ./aufs4-base.patch too.
+- apply ./aufs4-mmap.patch too.
+- apply ./aufs4-standalone.patch too, if you have a plan to set
+ CONFIG_AUFS_FS=m. otherwise you don't need ./aufs4-standalone.patch.
+- copy ./{Documentation,fs,include/uapi/linux/aufs_type.h} files to your
+ kernel source tree. Never copy $PWD/include/uapi/linux/Kbuild.
+- enable CONFIG_AUFS_FS, you can select either
+ =m or =y.
+- and build your kernel as usual.
+- install the built kernel.
+ Note: Since linux-3.9, every filesystem module requires an alias
+ "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
+ modules.aliases file if you set CONFIG_AUFS_FS=m.
+- install the header files too by "make headers_install" to the
+ directory where you specify. By default, it is $PWD/usr.
+ "make help" shows a brief note for headers_install.
+- and reboot your system.
+
+2.
+- module only (CONFIG_AUFS_FS=m).
+- apply ./aufs4-base.patch to your kernel source files.
+- apply ./aufs4-mmap.patch too.
+- apply ./aufs4-standalone.patch too.
+- build your kernel, don't forget "make headers_install", and reboot.
+- edit ./config.mk and set other aufs configurations if necessary.
+ Note: You should read $PWD/fs/aufs/Kconfig carefully which describes
+ every aufs configurations.
+- build the module by simple "make".
+ Note: Since linux-3.9, every filesystem module requires an alias
+ "fs-<fsname>". You should make sure that "fs-aufs" is listed in your
+ modules.aliases file.
+- you can specify ${KDIR} make variable which points to your kernel
+ source tree.
+- install the files
+ + run "make install" to install the aufs module, or copy the built
+ $PWD/aufs.ko to /lib/modules/... and run depmod -a (or reboot simply).
+ + run "make install_headers" (instead of headers_install) to install
+ the modified aufs header file (you can specify DESTDIR which is
+ available in aufs standalone version's Makefile only), or copy
+ $PWD/usr/include/linux/aufs_type.h to /usr/include/linux or wherever
+ you like manually. By default, the target directory is $PWD/usr.
+- no need to apply aufs4-kbuild.patch, nor copying source files to your
+ kernel source tree.
+
+Note: The header file aufs_type.h is necessary to build aufs-util
+ as well as "make headers_install" in the kernel source tree.
+ headers_install is subject to be forgotten, but it is essentially
+ necessary, not only for building aufs-util.
+ You may not meet problems without headers_install in some older
+ version though.
+
+And then,
+- read README in aufs-util, build and install it
+- note that your distribution may contain an obsoleted version of
+ aufs_type.h in /usr/include/linux or something. When you build aufs
+ utilities, make sure that your compiler refers the correct aufs header
+ file which is built by "make headers_install."
+- if you want to use readdir(3) in userspace or pathconf(3) wrapper,
+ then run "make install_ulib" too. And refer to the aufs manual in
+ detail.
+
+There several other patches in aufs4-standalone.git. They are all
+optional. When you meet some problems, they will help you.
+- aufs4-loopback.patch
+ Supports a nested loopback mount in a branch-fs. This patch is
+ unnecessary until aufs produces a message like "you may want to try
+ another patch for loopback file".
+- vfs-ino.patch
+ Modifies a system global kernel internal function get_next_ino() in
+ order to stop assigning 0 for an inode-number. Not directly related to
+ aufs, but recommended generally.
+- tmpfs-idr.patch
+ Keeps the tmpfs inode number as the lowest value. Effective to reduce
+ the size of aufs XINO files for tmpfs branch. Also it prevents the
+ duplication of inode number, which is important for backup tools and
+ other utilities. When you find aufs XINO files for tmpfs branch
+ growing too much, try this patch.
+- lockdep-debug.patch
+ Because aufs is not only an ordinary filesystem (callee of VFS), but
+ also a caller of VFS functions for branch filesystems, subclassing of
+ the internal locks for LOCKDEP is necessary. LOCKDEP is a debugging
+ feature of linux kernel. If you enable CONFIG_LOCKDEP, then you will
+ need to apply this debug patch to expand several constant values.
+ If don't know what LOCKDEP, then you don't have apply this patch.
+
+
+4. Usage
+----------------------------------------
+At first, make sure aufs-util are installed, and please read the aufs
+manual, aufs.5 in aufs-util.git tree.
+$ man -l aufs.5
+
+And then,
+$ mkdir /tmp/rw /tmp/aufs
+# mount -t aufs -o br=/tmp/rw:${HOME} none /tmp/aufs
+
+Here is another example. The result is equivalent.
+# mount -t aufs -o br=/tmp/rw=rw:${HOME}=ro none /tmp/aufs
+ Or
+# mount -t aufs -o br:/tmp/rw none /tmp/aufs
+# mount -o remount,append:${HOME} /tmp/aufs
+
+Then, you can see whole tree of your home dir through /tmp/aufs. If
+you modify a file under /tmp/aufs, the one on your home directory is
+not affected, instead the same named file will be newly created under
+/tmp/rw. And all of your modification to a file will be applied to
+the one under /tmp/rw. This is called the file based Copy on Write
+(COW) method.
+Aufs mount options are described in aufs.5.
+If you run chroot or something and make your aufs as a root directory,
+then you need to customize the shutdown script. See the aufs manual in
+detail.
+
+Additionally, there are some sample usages of aufs which are a
+diskless system with network booting, and LiveCD over NFS.
+See sample dir in CVS tree on SourceForge.
+
+
+5. Contact
+----------------------------------------
+When you have any problems or strange behaviour in aufs, please let me
+know with:
+- /proc/mounts (instead of the output of mount(8))
+- /sys/module/aufs/*
+- /sys/fs/aufs/* (if you have them)
+- /debug/aufs/* (if you have them)
+- linux kernel version
+ if your kernel is not plain, for example modified by distributor,
+ the url where i can download its source is necessary too.
+- aufs version which was printed at loading the module or booting the
+ system, instead of the date you downloaded.
+- configuration (define/undefine CONFIG_AUFS_xxx)
+- kernel configuration or /proc/config.gz (if you have it)
+- behaviour which you think to be incorrect
+- actual operation, reproducible one is better
+- mailto: aufs-users at lists.sourceforge.net
+
+Usually, I don't watch the Public Areas(Bugs, Support Requests, Patches,
+and Feature Requests) on SourceForge. Please join and write to
+aufs-users ML.
+
+
+6. Acknowledgements
+----------------------------------------
+Thanks to everyone who have tried and are using aufs, whoever
+have reported a bug or any feedback.
+
+Especially donators:
+Tomas Matejicek(slax.org) made a donation (much more than once).
+ Since Apr 2010, Tomas M (the author of Slax and Linux Live
+ scripts) is making "doubling" donations.
+ Unfortunately I cannot list all of the donators, but I really
+ appreciate.
+ It ends Aug 2010, but the ordinary donation URL is still available.
+ <http://sourceforge.net/donate/index.php?group_id=167503>
+Dai Itasaka made a donation (2007/8).
+Chuck Smith made a donation (2008/4, 10 and 12).
+Henk Schoneveld made a donation (2008/9).
+Chih-Wei Huang, ASUS, CTC donated Eee PC 4G (2008/10).
+Francois Dupoux made a donation (2008/11).
+Bruno Cesar Ribas and Luis Carlos Erpen de Bona, C3SL serves public
+ aufs2 GIT tree (2009/2).
+William Grant made a donation (2009/3).
+Patrick Lane made a donation (2009/4).
+The Mail Archive (mail-archive.com) made donations (2009/5).
+Nippy Networks (Ed Wildgoose) made a donation (2009/7).
+New Dream Network, LLC (www.dreamhost.com) made a donation (2009/11).
+Pavel Pronskiy made a donation (2011/2).
+Iridium and Inmarsat satellite phone retailer (www.mailasail.com), Nippy
+ Networks (Ed Wildgoose) made a donation for hardware (2011/3).
+Max Lekomcev (DOM-TV project) made a donation (2011/7, 12, 2012/3, 6 and
+11).
+Sam Liddicott made a donation (2011/9).
+Era Scarecrow made a donation (2013/4).
+Bor Ratajc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+POIRETTE Marc made a donation (2013/4).
+Alessandro Gorreta made a donation (2013/4).
+lauri kasvandik made a donation (2013/5).
+"pemasu from Finland" made a donation (2013/7).
+The Parted Magic Project made a donation (2013/9 and 11).
+Pavel Barta made a donation (2013/10).
+Nikolay Pertsev made a donation (2014/5).
+James B made a donation (2014/7 and 2015/7).
+Stefano Di Biase made a donation (2014/8).
+Daniel Epellei made a donation (2015/1).
+OmegaPhil made a donation (2016/1).
+
+Thank you very much.
+Donations are always, including future donations, very important and
+helpful for me to keep on developing aufs.
+
+
+7.
+----------------------------------------
+If you are an experienced user, no explanation is needed. Aufs is
+just a linux filesystem.
+
+
+Enjoy!
+
+# Local variables: ;
+# mode: text;
+# End: ;
diff --git a/Documentation/filesystems/aufs/design/01intro.txt b/Documentation/filesystems/aufs/design/01intro.txt
new file mode 100644
index 000000000..5d0121439
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/01intro.txt
@@ -0,0 +1,157 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Introduction
+----------------------------------------
+
+aufs [ei ju: ef es] | [a u f s]
+1. abbrev. for "advanced multi-layered unification filesystem".
+2. abbrev. for "another unionfs".
+3. abbrev. for "auf das" in German which means "on the" in English.
+ Ex. "Butter aufs Brot"(G) means "butter onto bread"(E).
+ But "Filesystem aufs Filesystem" is hard to understand.
+
+AUFS is a filesystem with features:
+- multi layered stackable unification filesystem, the member directory
+ is called as a branch.
+- branch permission and attribute, 'readonly', 'real-readonly',
+ 'readwrite', 'whiteout-able', 'link-able whiteout', etc. and their
+ combination.
+- internal "file copy-on-write".
+- logical deletion, whiteout.
+- dynamic branch manipulation, adding, deleting and changing permission.
+- allow bypassing aufs, user's direct branch access.
+- external inode number translation table and bitmap which maintains the
+ persistent aufs inode number.
+- seekable directory, including NFS readdir.
+- file mapping, mmap and sharing pages.
+- pseudo-link, hardlink over branches.
+- loopback mounted filesystem as a branch.
+- several policies to select one among multiple writable branches.
+- revert a single systemcall when an error occurs in aufs.
+- and more...
+
+
+Multi Layered Stackable Unification Filesystem
+----------------------------------------------------------------------
+Most people already knows what it is.
+It is a filesystem which unifies several directories and provides a
+merged single directory. When users access a file, the access will be
+passed/re-directed/converted (sorry, I am not sure which English word is
+correct) to the real file on the member filesystem. The member
+filesystem is called 'lower filesystem' or 'branch' and has a mode
+'readonly' and 'readwrite.' And the deletion for a file on the lower
+readonly branch is handled by creating 'whiteout' on the upper writable
+branch.
+
+On LKML, there have been discussions about UnionMount (Jan Blunck,
+Bharata B Rao and Valerie Aurora) and Unionfs (Erez Zadok). They took
+different approaches to implement the merged-view.
+The former tries putting it into VFS, and the latter implements as a
+separate filesystem.
+(If I misunderstand about these implementations, please let me know and
+I shall correct it. Because it is a long time ago when I read their
+source files last time).
+
+UnionMount's approach will be able to small, but may be hard to share
+branches between several UnionMount since the whiteout in it is
+implemented in the inode on branch filesystem and always
+shared. According to Bharata's post, readdir does not seems to be
+finished yet.
+There are several missing features known in this implementations such as
+- for users, the inode number may change silently. eg. copy-up.
+- link(2) may break by copy-up.
+- read(2) may get an obsoleted filedata (fstat(2) too).
+- fcntl(F_SETLK) may be broken by copy-up.
+- unnecessary copy-up may happen, for example mmap(MAP_PRIVATE) after
+ open(O_RDWR).
+
+In linux-3.18, "overlay" filesystem (formerly known as "overlayfs") was
+merged into mainline. This is another implementation of UnionMount as a
+separated filesystem. All the limitations and known problems which
+UnionMount are equally inherited to "overlay" filesystem.
+
+Unionfs has a longer history. When I started implementing a stackable
+filesystem (Aug 2005), it already existed. It has virtual super_block,
+inode, dentry and file objects and they have an array pointing lower
+same kind objects. After contributing many patches for Unionfs, I
+re-started my project AUFS (Jun 2006).
+
+In AUFS, the structure of filesystem resembles to Unionfs, but I
+implemented my own ideas, approaches and enhancements and it became
+totally different one.
+
+Comparing DM snapshot and fs based implementation
+- the number of bytes to be copied between devices is much smaller.
+- the type of filesystem must be one and only.
+- the fs must be writable, no readonly fs, even for the lower original
+ device. so the compression fs will not be usable. but if we use
+ loopback mount, we may address this issue.
+ for instance,
+ mount /cdrom/squashfs.img /sq
+ losetup /sq/ext2.img
+ losetup /somewhere/cow
+ dmsetup "snapshot /dev/loop0 /dev/loop1 ..."
+- it will be difficult (or needs more operations) to extract the
+ difference between the original device and COW.
+- DM snapshot-merge may help a lot when users try merging. in the
+ fs-layer union, users will use rsync(1).
+
+You may want to read my old paper "Filesystems in LiveCD"
+(http://aufs.sourceforge.net/aufs2/report/sq/sq.pdf).
+
+
+Several characters/aspects/persona of aufs
+----------------------------------------------------------------------
+
+Aufs has several characters, aspects or persona.
+1. a filesystem, callee of VFS helper
+2. sub-VFS, caller of VFS helper for branches
+3. a virtual filesystem which maintains persistent inode number
+4. reader/writer of files on branches such like an application
+
+1. Callee of VFS Helper
+As an ordinary linux filesystem, aufs is a callee of VFS. For instance,
+unlink(2) from an application reaches sys_unlink() kernel function and
+then vfs_unlink() is called. vfs_unlink() is one of VFS helper and it
+calls filesystem specific unlink operation. Actually aufs implements the
+unlink operation but it behaves like a redirector.
+
+2. Caller of VFS Helper for Branches
+aufs_unlink() passes the unlink request to the branch filesystem as if
+it were called from VFS. So the called unlink operation of the branch
+filesystem acts as usual. As a caller of VFS helper, aufs should handle
+every necessary pre/post operation for the branch filesystem.
+- acquire the lock for the parent dir on a branch
+- lookup in a branch
+- revalidate dentry on a branch
+- mnt_want_write() for a branch
+- vfs_unlink() for a branch
+- mnt_drop_write() for a branch
+- release the lock on a branch
+
+3. Persistent Inode Number
+One of the most important issue for a filesystem is to maintain inode
+numbers. This is particularly important to support exporting a
+filesystem via NFS. Aufs is a virtual filesystem which doesn't have a
+backend block device for its own. But some storage is necessary to
+keep and maintain the inode numbers. It may be a large space and may not
+suit to keep in memory. Aufs rents some space from its first writable
+branch filesystem (by default) and creates file(s) on it. These files
+are created by aufs internally and removed soon (currently) keeping
+opened.
+Note: Because these files are removed, they are totally gone after
+ unmounting aufs. It means the inode numbers are not persistent
+ across unmount or reboot. I have a plan to make them really
+ persistent which will be important for aufs on NFS server.
+
+4. Read/Write Files Internally (copy-on-write)
+Because a branch can be readonly, when you write a file on it, aufs will
+"copy-up" it to the upper writable branch internally. And then write the
+originally requested thing to the file. Generally kernel doesn't
+open/read/write file actively. In aufs, even a single write may cause a
+internal "file copy". This behaviour is very similar to cp(1) command.
+
+Some people may think it is better to pass such work to user space
+helper, instead of doing in kernel space. Actually I am still thinking
+about it. But currently I have implemented it in kernel space.
diff --git a/Documentation/filesystems/aufs/design/02struct.txt b/Documentation/filesystems/aufs/design/02struct.txt
new file mode 100644
index 000000000..783328a75
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/02struct.txt
@@ -0,0 +1,245 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Basic Aufs Internal Structure
+
+Superblock/Inode/Dentry/File Objects
+----------------------------------------------------------------------
+As like an ordinary filesystem, aufs has its own
+superblock/inode/dentry/file objects. All these objects have a
+dynamically allocated array and store the same kind of pointers to the
+lower filesystem, branch.
+For example, when you build a union with one readwrite branch and one
+readonly, mounted /au, /rw and /ro respectively.
+- /au = /rw + /ro
+- /ro/fileA exists but /rw/fileA
+
+Aufs lookup operation finds /ro/fileA and gets dentry for that. These
+pointers are stored in a aufs dentry. The array in aufs dentry will be,
+- [0] = NULL (because /rw/fileA doesn't exist)
+- [1] = /ro/fileA
+
+This style of an array is essentially same to the aufs
+superblock/inode/dentry/file objects.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+branches dynamically, these objects has its own generation. When
+branches are changed, the generation in aufs superblock is
+incremented. And a generation in other object are compared when it is
+accessed. When a generation in other objects are obsoleted, aufs
+refreshes the internal array.
+
+
+Superblock
+----------------------------------------------------------------------
+Additionally aufs superblock has some data for policies to select one
+among multiple writable branches, XIB files, pseudo-links and kobject.
+See below in detail.
+About the policies which supports copy-down a directory, see
+wbr_policy.txt too.
+
+
+Branch and XINO(External Inode Number Translation Table)
+----------------------------------------------------------------------
+Every branch has its own xino (external inode number translation table)
+file. The xino file is created and unlinked by aufs internally. When two
+members of a union exist on the same filesystem, they share the single
+xino file.
+The struct of a xino file is simple, just a sequence of aufs inode
+numbers which is indexed by the lower inode number.
+In the above sample, assume the inode number of /ro/fileA is i111 and
+aufs assigns the inode number i999 for fileA. Then aufs writes 999 as
+4(8) bytes at 111 * 4(8) bytes offset in the xino file.
+
+When the inode numbers are not contiguous, the xino file will be sparse
+which has a hole in it and doesn't consume as much disk space as it
+might appear. If your branch filesystem consumes disk space for such
+holes, then you should specify 'xino=' option at mounting aufs.
+
+Aufs has a mount option to free the disk blocks for such holes in XINO
+files on tmpfs or ramdisk. But it is not so effective actually. If you
+meet a problem of disk shortage due to XINO files, then you should try
+"tmpfs-ino.patch" (and "vfs-ino.patch" too) in aufs4-standalone.git.
+The patch localizes the assignment inumbers per tmpfs-mount and avoid
+the holes in XINO files.
+
+Also a writable branch has three kinds of "whiteout bases". All these
+are existed when the branch is joined to aufs, and their names are
+whiteout-ed doubly, so that users will never see their names in aufs
+hierarchy.
+1. a regular file which will be hardlinked to all whiteouts.
+2. a directory to store a pseudo-link.
+3. a directory to store an "orphan"-ed file temporary.
+
+1. Whiteout Base
+ When you remove a file on a readonly branch, aufs handles it as a
+ logical deletion and creates a whiteout on the upper writable branch
+ as a hardlink of this file in order not to consume inode on the
+ writable branch.
+2. Pseudo-link Dir
+ See below, Pseudo-link.
+3. Step-Parent Dir
+ When "fileC" exists on the lower readonly branch only and it is
+ opened and removed with its parent dir, and then user writes
+ something into it, then aufs copies-up fileC to this
+ directory. Because there is no other dir to store fileC. After
+ creating a file under this dir, the file is unlinked.
+
+Because aufs supports manipulating branches, ie. add/delete/change
+dynamically, a branch has its own id. When the branch order changes,
+aufs finds the new index by searching the branch id.
+
+
+Pseudo-link
+----------------------------------------------------------------------
+Assume "fileA" exists on the lower readonly branch only and it is
+hardlinked to "fileB" on the branch. When you write something to fileA,
+aufs copies-up it to the upper writable branch. Additionally aufs
+creates a hardlink under the Pseudo-link Directory of the writable
+branch. The inode of a pseudo-link is kept in aufs super_block as a
+simple list. If fileB is read after unlinking fileA, aufs returns
+filedata from the pseudo-link instead of the lower readonly
+branch. Because the pseudo-link is based upon the inode, to keep the
+inode number by xino (see above) is essentially necessary.
+
+All the hardlinks under the Pseudo-link Directory of the writable branch
+should be restored in a proper location later. Aufs provides a utility
+to do this. The userspace helpers executed at remounting and unmounting
+aufs by default.
+During this utility is running, it puts aufs into the pseudo-link
+maintenance mode. In this mode, only the process which began the
+maintenance mode (and its child processes) is allowed to operate in
+aufs. Some other processes which are not related to the pseudo-link will
+be allowed to run too, but the rest have to return an error or wait
+until the maintenance mode ends. If a process already acquires an inode
+mutex (in VFS), it has to return an error.
+
+
+XIB(external inode number bitmap)
+----------------------------------------------------------------------
+Addition to the xino file per a branch, aufs has an external inode number
+bitmap in a superblock object. It is also an internal file such like a
+xino file.
+It is a simple bitmap to mark whether the aufs inode number is in-use or
+not.
+To reduce the file I/O, aufs prepares a single memory page to cache xib.
+
+As well as XINO files, aufs has a feature to truncate/refresh XIB to
+reduce the number of consumed disk blocks for these files.
+
+
+Virtual or Vertical Dir, and Readdir in Userspace
+----------------------------------------------------------------------
+In order to support multiple layers (branches), aufs readdir operation
+constructs a virtual dir block on memory. For readdir, aufs calls
+vfs_readdir() internally for each dir on branches, merges their entries
+with eliminating the whiteout-ed ones, and sets it to file (dir)
+object. So the file object has its entry list until it is closed. The
+entry list will be updated when the file position is zero and becomes
+obsoleted. This decision is made in aufs automatically.
+
+The dynamically allocated memory block for the name of entries has a
+unit of 512 bytes (by default) and stores the names contiguously (no
+padding). Another block for each entry is handled by kmem_cache too.
+During building dir blocks, aufs creates hash list and judging whether
+the entry is whiteouted by its upper branch or already listed.
+The merged result is cached in the corresponding inode object and
+maintained by a customizable life-time option.
+
+Some people may call it can be a security hole or invite DoS attack
+since the opened and once readdir-ed dir (file object) holds its entry
+list and becomes a pressure for system memory. But I'd say it is similar
+to files under /proc or /sys. The virtual files in them also holds a
+memory page (generally) while they are opened. When an idea to reduce
+memory for them is introduced, it will be applied to aufs too.
+For those who really hate this situation, I've developed readdir(3)
+library which operates this merging in userspace. You just need to set
+LD_PRELOAD environment variable, and aufs will not consume no memory in
+kernel space for readdir(3).
+
+
+Workqueue
+----------------------------------------------------------------------
+Aufs sometimes requires privilege access to a branch. For instance,
+in copy-up/down operation. When a user process is going to make changes
+to a file which exists in the lower readonly branch only, and the mode
+of one of ancestor directories may not be writable by a user
+process. Here aufs copy-up the file with its ancestors and they may
+require privilege to set its owner/group/mode/etc.
+This is a typical case of a application character of aufs (see
+Introduction).
+
+Aufs uses workqueue synchronously for this case. It creates its own
+workqueue. The workqueue is a kernel thread and has privilege. Aufs
+passes the request to call mkdir or write (for example), and wait for
+its completion. This approach solves a problem of a signal handler
+simply.
+If aufs didn't adopt the workqueue and changed the privilege of the
+process, then the process may receive the unexpected SIGXFSZ or other
+signals.
+
+Also aufs uses the system global workqueue ("events" kernel thread) too
+for asynchronous tasks, such like handling inotify/fsnotify, re-creating a
+whiteout base and etc. This is unrelated to a privilege.
+Most of aufs operation tries acquiring a rw_semaphore for aufs
+superblock at the beginning, at the same time waits for the completion
+of all queued asynchronous tasks.
+
+
+Whiteout
+----------------------------------------------------------------------
+The whiteout in aufs is very similar to Unionfs's. That is represented
+by its filename. UnionMount takes an approach of a file mode, but I am
+afraid several utilities (find(1) or something) will have to support it.
+
+Basically the whiteout represents "logical deletion" which stops aufs to
+lookup further, but also it represents "dir is opaque" which also stop
+further lookup.
+
+In aufs, rmdir(2) and rename(2) for dir uses whiteout alternatively.
+In order to make several functions in a single systemcall to be
+revertible, aufs adopts an approach to rename a directory to a temporary
+unique whiteouted name.
+For example, in rename(2) dir where the target dir already existed, aufs
+renames the target dir to a temporary unique whiteouted name before the
+actual rename on a branch, and then handles other actions (make it opaque,
+update the attributes, etc). If an error happens in these actions, aufs
+simply renames the whiteouted name back and returns an error. If all are
+succeeded, aufs registers a function to remove the whiteouted unique
+temporary name completely and asynchronously to the system global
+workqueue.
+
+
+Copy-up
+----------------------------------------------------------------------
+It is a well-known feature or concept.
+When user modifies a file on a readonly branch, aufs operate "copy-up"
+internally and makes change to the new file on the upper writable branch.
+When the trigger systemcall does not update the timestamps of the parent
+dir, aufs reverts it after copy-up.
+
+
+Move-down (aufs3.9 and later)
+----------------------------------------------------------------------
+"Copy-up" is one of the essential feature in aufs. It copies a file from
+the lower readonly branch to the upper writable branch when a user
+changes something about the file.
+"Move-down" is an opposite action of copy-up. Basically this action is
+ran manually instead of automatically and internally.
+For desgin and implementation, aufs has to consider these issues.
+- whiteout for the file may exist on the lower branch.
+- ancestor directories may not exist on the lower branch.
+- diropq for the ancestor directories may exist on the upper branch.
+- free space on the lower branch will reduce.
+- another access to the file may happen during moving-down, including
+ UDBA (see "Revalidate Dentry and UDBA").
+- the file should not be hard-linked nor pseudo-linked. they should be
+ handled by auplink utility later.
+
+Sometimes users want to move-down a file from the upper writable branch
+to the lower readonly or writable branch. For instance,
+- the free space of the upper writable branch is going to run out.
+- create a new intermediate branch between the upper and lower branch.
+- etc.
+
+For this purpose, use "aumvdown" command in aufs-util.git.
diff --git a/Documentation/filesystems/aufs/design/03atomic_open.txt b/Documentation/filesystems/aufs/design/03atomic_open.txt
new file mode 100644
index 000000000..741ad6d66
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03atomic_open.txt
@@ -0,0 +1,72 @@
+
+# Copyright (C) 2015-2016 Junjiro R. Okajima
+
+Support for a branch who has its ->atomic_open()
+----------------------------------------------------------------------
+The filesystems who implement its ->atomic_open() are not majority. For
+example NFSv4 does, and aufs should call NFSv4 ->atomic_open,
+particularly for open(O_CREAT|O_EXCL, 0400) case. Other than
+->atomic_open(), NFSv4 returns an error for this open(2). While I am not
+sure whether all filesystems who have ->atomic_open() behave like this,
+but NFSv4 surely returns the error.
+
+In order to support ->atomic_open() for aufs, there are a few
+approaches.
+
+A. Introduce aufs_atomic_open()
+ - calls one of VFS:do_last(), lookup_open() or atomic_open() for
+ branch fs.
+B. Introduce aufs_atomic_open() calling create, open and chmod. this is
+ an aufs user Pip Cet's approach
+ - calls aufs_create(), VFS finish_open() and notify_change().
+ - pass fake-mode to finish_open(), and then correct the mode by
+ notify_change().
+C. Extend aufs_open() to call branch fs's ->atomic_open()
+ - no aufs_atomic_open().
+ - aufs_lookup() registers the TID to an aufs internal object.
+ - aufs_create() does nothing when the matching TID is registered, but
+ registers the mode.
+ - aufs_open() calls branch fs's ->atomic_open() when the matching
+ TID is registered.
+D. Extend aufs_open() to re-try branch fs's ->open() with superuser's
+ credential
+ - no aufs_atomic_open().
+ - aufs_create() registers the TID to an internal object. this info
+ represents "this process created this file just now."
+ - when aufs gets EACCES from branch fs's ->open(), then confirm the
+ registered TID and re-try open() with superuser's credential.
+
+Pros and cons for each approach.
+
+A.
+ - straightforward but highly depends upon VFS internal.
+ - the atomic behavaiour is kept.
+ - some of parameters such as nameidata are hard to reproduce for
+ branch fs.
+ - large overhead.
+B.
+ - easy to implement.
+ - the atomic behavaiour is lost.
+C.
+ - the atomic behavaiour is kept.
+ - dirty and tricky.
+ - VFS checks whether the file is created correctly after calling
+ ->create(), which means this approach doesn't work.
+D.
+ - easy to implement.
+ - the atomic behavaiour is lost.
+ - to open a file with superuser's credential and give it to a user
+ process is a bad idea, since the file object keeps the credential
+ in it. It may affect LSM or something. This approach doesn't work
+ either.
+
+The approach A is ideal, but it hard to implement. So here is a
+variation of A, which is to be implemented.
+
+A-1. Introduce aufs_atomic_open()
+ - calls branch fs ->atomic_open() if exists. otherwise calls
+ vfs_create() and finish_open().
+ - the demerit is that the several checks after branch fs
+ ->atomic_open() are lost. in the ordinary case, the checks are
+ done by VFS:do_last(), lookup_open() and atomic_open(). some can
+ be implemented in aufs, but not all I am afraid.
diff --git a/Documentation/filesystems/aufs/design/03lookup.txt b/Documentation/filesystems/aufs/design/03lookup.txt
new file mode 100644
index 000000000..5b6b000b5
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/03lookup.txt
@@ -0,0 +1,100 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Lookup in a Branch
+----------------------------------------------------------------------
+Since aufs has a character of sub-VFS (see Introduction), it operates
+lookup for branches as VFS does. It may be a heavy work. But almost all
+lookup operation in aufs is the simplest case, ie. lookup only an entry
+directly connected to its parent. Digging down the directory hierarchy
+is unnecessary. VFS has a function lookup_one_len() for that use, and
+aufs calls it.
+
+When a branch is a remote filesystem, aufs basically relies upon its
+->d_revalidate(), also aufs forces the hardest revalidate tests for
+them.
+For d_revalidate, aufs implements three levels of revalidate tests. See
+"Revalidate Dentry and UDBA" in detail.
+
+
+Test Only the Highest One for the Directory Permission (dirperm1 option)
+----------------------------------------------------------------------
+Let's try case study.
+- aufs has two branches, upper readwrite and lower readonly.
+ /au = /rw + /ro
+- "dirA" exists under /ro, but /rw. and its mode is 0700.
+- user invoked "chmod a+rx /au/dirA"
+- the internal copy-up is activated and "/rw/dirA" is created and its
+ permission bits are set to world readable.
+- then "/au/dirA" becomes world readable?
+
+In this case, /ro/dirA is still 0700 since it exists in readonly branch,
+or it may be a natively readonly filesystem. If aufs respects the lower
+branch, it should not respond readdir request from other users. But user
+allowed it by chmod. Should really aufs rejects showing the entries
+under /ro/dirA?
+
+To be honest, I don't have a good solution for this case. So aufs
+implements 'dirperm1' and 'nodirperm1' mount options, and leave it to
+users.
+When dirperm1 is specified, aufs checks only the highest one for the
+directory permission, and shows the entries. Otherwise, as usual, checks
+every dir existing on all branches and rejects the request.
+
+As a side effect, dirperm1 option improves the performance of aufs
+because the number of permission check is reduced when the number of
+branch is many.
+
+
+Revalidate Dentry and UDBA (User's Direct Branch Access)
+----------------------------------------------------------------------
+Generally VFS helpers re-validate a dentry as a part of lookup.
+0. digging down the directory hierarchy.
+1. lock the parent dir by its i_mutex.
+2. lookup the final (child) entry.
+3. revalidate it.
+4. call the actual operation (create, unlink, etc.)
+5. unlock the parent dir
+
+If the filesystem implements its ->d_revalidate() (step 3), then it is
+called. Actually aufs implements it and checks the dentry on a branch is
+still valid.
+But it is not enough. Because aufs has to release the lock for the
+parent dir on a branch at the end of ->lookup() (step 2) and
+->d_revalidate() (step 3) while the i_mutex of the aufs dir is still
+held by VFS.
+If the file on a branch is changed directly, eg. bypassing aufs, after
+aufs released the lock, then the subsequent operation may cause
+something unpleasant result.
+
+This situation is a result of VFS architecture, ->lookup() and
+->d_revalidate() is separated. But I never say it is wrong. It is a good
+design from VFS's point of view. It is just not suitable for sub-VFS
+character in aufs.
+
+Aufs supports such case by three level of revalidation which is
+selectable by user.
+1. Simple Revalidate
+ Addition to the native flow in VFS's, confirm the child-parent
+ relationship on the branch just after locking the parent dir on the
+ branch in the "actual operation" (step 4). When this validation
+ fails, aufs returns EBUSY. ->d_revalidate() (step 3) in aufs still
+ checks the validation of the dentry on branches.
+2. Monitor Changes Internally by Inotify/Fsnotify
+ Addition to above, in the "actual operation" (step 4) aufs re-lookup
+ the dentry on the branch, and returns EBUSY if it finds different
+ dentry.
+ Additionally, aufs sets the inotify/fsnotify watch for every dir on branches
+ during it is in cache. When the event is notified, aufs registers a
+ function to kernel 'events' thread by schedule_work(). And the
+ function sets some special status to the cached aufs dentry and inode
+ private data. If they are not cached, then aufs has nothing to
+ do. When the same file is accessed through aufs (step 0-3) later,
+ aufs will detect the status and refresh all necessary data.
+ In this mode, aufs has to ignore the event which is fired by aufs
+ itself.
+3. No Extra Validation
+ This is the simplest test and doesn't add any additional revalidation
+ test, and skip the revalidation in step 4. It is useful and improves
+ aufs performance when system surely hide the aufs branches from user,
+ by over-mounting something (or another method).
diff --git a/Documentation/filesystems/aufs/design/04branch.txt b/Documentation/filesystems/aufs/design/04branch.txt
new file mode 100644
index 000000000..e68f4d3df
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/04branch.txt
@@ -0,0 +1,61 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Branch Manipulation
+
+Since aufs supports dynamic branch manipulation, ie. add/remove a branch
+and changing its permission/attribute, there are a lot of works to do.
+
+
+Add a Branch
+----------------------------------------------------------------------
+o Confirm the adding dir exists outside of aufs, including loopback
+ mount, and its various attributes.
+o Initialize the xino file and whiteout bases if necessary.
+ See struct.txt.
+
+o Check the owner/group/mode of the directory
+ When the owner/group/mode of the adding directory differs from the
+ existing branch, aufs issues a warning because it may impose a
+ security risk.
+ For example, when a upper writable branch has a world writable empty
+ top directory, a malicious user can create any files on the writable
+ branch directly, like copy-up and modify manually. If something like
+ /etc/{passwd,shadow} exists on the lower readonly branch but the upper
+ writable branch, and the writable branch is world-writable, then a
+ malicious guy may create /etc/passwd on the writable branch directly
+ and the infected file will be valid in aufs.
+ I am afraid it can be a security issue, but aufs can do nothing except
+ producing a warning.
+
+
+Delete a Branch
+----------------------------------------------------------------------
+o Confirm the deleting branch is not busy
+ To be general, there is one merit to adopt "remount" interface to
+ manipulate branches. It is to discard caches. At deleting a branch,
+ aufs checks the still cached (and connected) dentries and inodes. If
+ there are any, then they are all in-use. An inode without its
+ corresponding dentry can be alive alone (for example, inotify/fsnotify case).
+
+ For the cached one, aufs checks whether the same named entry exists on
+ other branches.
+ If the cached one is a directory, because aufs provides a merged view
+ to users, as long as one dir is left on any branch aufs can show the
+ dir to users. In this case, the branch can be removed from aufs.
+ Otherwise aufs rejects deleting the branch.
+
+ If any file on the deleting branch is opened by aufs, then aufs
+ rejects deleting.
+
+
+Modify the Permission of a Branch
+----------------------------------------------------------------------
+o Re-initialize or remove the xino file and whiteout bases if necessary.
+ See struct.txt.
+
+o rw --> ro: Confirm the modifying branch is not busy
+ Aufs rejects the request if any of these conditions are true.
+ - a file on the branch is mmap-ed.
+ - a regular file on the branch is opened for write and there is no
+ same named entry on the upper branch.
diff --git a/Documentation/filesystems/aufs/design/05wbr_policy.txt b/Documentation/filesystems/aufs/design/05wbr_policy.txt
new file mode 100644
index 000000000..1726d5d06
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/05wbr_policy.txt
@@ -0,0 +1,51 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Policies to Select One among Multiple Writable Branches
+----------------------------------------------------------------------
+When the number of writable branch is more than one, aufs has to decide
+the target branch for file creation or copy-up. By default, the highest
+writable branch which has the parent (or ancestor) dir of the target
+file is chosen (top-down-parent policy).
+By user's request, aufs implements some other policies to select the
+writable branch, for file creation several policies, round-robin,
+most-free-space, and other policies. For copy-up, top-down-parent,
+bottom-up-parent, bottom-up and others.
+
+As expected, the round-robin policy selects the branch in circular. When
+you have two writable branches and creates 10 new files, 5 files will be
+created for each branch. mkdir(2) systemcall is an exception. When you
+create 10 new directories, all will be created on the same branch.
+And the most-free-space policy selects the one which has most free
+space among the writable branches. The amount of free space will be
+checked by aufs internally, and users can specify its time interval.
+
+The policies for copy-up is more simple,
+top-down-parent is equivalent to the same named on in create policy,
+bottom-up-parent selects the writable branch where the parent dir
+exists and the nearest upper one from the copyup-source,
+bottom-up selects the nearest upper writable branch from the
+copyup-source, regardless the existence of the parent dir.
+
+There are some rules or exceptions to apply these policies.
+- If there is a readonly branch above the policy-selected branch and
+ the parent dir is marked as opaque (a variation of whiteout), or the
+ target (creating) file is whiteout-ed on the upper readonly branch,
+ then the result of the policy is ignored and the target file will be
+ created on the nearest upper writable branch than the readonly branch.
+- If there is a writable branch above the policy-selected branch and
+ the parent dir is marked as opaque or the target file is whiteouted
+ on the branch, then the result of the policy is ignored and the target
+ file will be created on the highest one among the upper writable
+ branches who has diropq or whiteout. In case of whiteout, aufs removes
+ it as usual.
+- link(2) and rename(2) systemcalls are exceptions in every policy.
+ They try selecting the branch where the source exists as possible
+ since copyup a large file will take long time. If it can't be,
+ ie. the branch where the source exists is readonly, then they will
+ follow the copyup policy.
+- There is an exception for rename(2) when the target exists.
+ If the rename target exists, aufs compares the index of the branches
+ where the source and the target exists and selects the higher
+ one. If the selected branch is readonly, then aufs follows the
+ copyup policy.
diff --git a/Documentation/filesystems/aufs/design/06fhsm.txt b/Documentation/filesystems/aufs/design/06fhsm.txt
new file mode 100644
index 000000000..84b46dc5b
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06fhsm.txt
@@ -0,0 +1,105 @@
+
+# Copyright (C) 2011-2016 Junjiro R. Okajima
+
+File-based Hierarchical Storage Management (FHSM)
+----------------------------------------------------------------------
+Hierarchical Storage Management (or HSM) is a well-known feature in the
+storage world. Aufs provides this feature as file-based with multiple
+writable branches, based upon the principle of "Colder, the Lower".
+Here the word "colder" means that the less used files, and "lower" means
+that the position in the order of the stacked branches vertically.
+These multiple writable branches are prioritized, ie. the topmost one
+should be the fastest drive and be used heavily.
+
+o Characters in aufs FHSM story
+- aufs itself and a new branch attribute.
+- a new ioctl interface to move-down and to establish a connection with
+ the daemon ("move-down" is a converse of "copy-up").
+- userspace tool and daemon.
+
+The userspace daemon establishes a connection with aufs and waits for
+the notification. The notified information is very similar to struct
+statfs containing the number of consumed blocks and inodes.
+When the consumed blocks/inodes of a branch exceeds the user-specified
+upper watermark, the daemon activates its move-down process until the
+consumed blocks/inodes reaches the user-specified lower watermark.
+
+The actual move-down is done by aufs based upon the request from
+user-space since we need to maintain the inode number and the internal
+pointer arrays in aufs.
+
+Currently aufs FHSM handles the regular files only. Additionally they
+must not be hard-linked nor pseudo-linked.
+
+
+o Cowork of aufs and the user-space daemon
+ During the userspace daemon established the connection, aufs sends a
+ small notification to it whenever aufs writes something into the
+ writable branch. But it may cost high since aufs issues statfs(2)
+ internally. So user can specify a new option to cache the
+ info. Actually the notification is controlled by these factors.
+ + the specified cache time.
+ + classified as "force" by aufs internally.
+ Until the specified time expires, aufs doesn't send the info
+ except the forced cases. When aufs decide forcing, the info is always
+ notified to userspace.
+ For example, the number of free inodes is generally large enough and
+ the shortage of it happens rarely. So aufs doesn't force the
+ notification when creating a new file, directory and others. This is
+ the typical case which aufs doesn't force.
+ When aufs writes the actual filedata and the files consumes any of new
+ blocks, the aufs forces notifying.
+
+
+o Interfaces in aufs
+- New branch attribute.
+ + fhsm
+ Specifies that the branch is managed by FHSM feature. In other word,
+ participant in the FHSM.
+ When nofhsm is set to the branch, it will not be the source/target
+ branch of the move-down operation. This attribute is set
+ independently from coo and moo attributes, and if you want full
+ FHSM, you should specify them as well.
+- New mount option.
+ + fhsm_sec
+ Specifies a second to suppress many less important info to be
+ notified.
+- New ioctl.
+ + AUFS_CTL_FHSM_FD
+ create a new file descriptor which userspace can read the notification
+ (a subset of struct statfs) from aufs.
+- Module parameter 'brs'
+ It has to be set to 1. Otherwise the new mount option 'fhsm' will not
+ be set.
+- mount helpers /sbin/mount.aufs and /sbin/umount.aufs
+ When there are two or more branches with fhsm attributes,
+ /sbin/mount.aufs invokes the user-space daemon and /sbin/umount.aufs
+ terminates it. As a result of remounting and branch-manipulation, the
+ number of branches with fhsm attribute can be one. In this case,
+ /sbin/mount.aufs will terminate the user-space daemon.
+
+
+Finally the operation is done as these steps in kernel-space.
+- make sure that,
+ + no one else is using the file.
+ + the file is not hard-linked.
+ + the file is not pseudo-linked.
+ + the file is a regular file.
+ + the parent dir is not opaqued.
+- find the target writable branch.
+- make sure the file is not whiteout-ed by the upper (than the target)
+ branch.
+- make the parent dir on the target branch.
+- mutex lock the inode on the branch.
+- unlink the whiteout on the target branch (if exists).
+- lookup and create the whiteout-ed temporary name on the target branch.
+- copy the file as the whiteout-ed temporary name on the target branch.
+- rename the whiteout-ed temporary name to the original name.
+- unlink the file on the source branch.
+- maintain the internal pointer array and the external inode number
+ table (XINO).
+- maintain the timestamps and other attributes of the parent dir and the
+ file.
+
+And of course, in every step, an error may happen. So the operation
+should restore the original file state after an error happens.
diff --git a/Documentation/filesystems/aufs/design/06mmap.txt b/Documentation/filesystems/aufs/design/06mmap.txt
new file mode 100644
index 000000000..991c0b1fa
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06mmap.txt
@@ -0,0 +1,59 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+mmap(2) -- File Memory Mapping
+----------------------------------------------------------------------
+In aufs, the file-mapped pages are handled by a branch fs directly, no
+interaction with aufs. It means aufs_mmap() calls the branch fs's
+->mmap().
+This approach is simple and good, but there is one problem.
+Under /proc, several entries show the mmapped files by its path (with
+device and inode number), and the printed path will be the path on the
+branch fs's instead of virtual aufs's.
+This is not a problem in most cases, but some utilities lsof(1) (and its
+user) may expect the path on aufs.
+
+To address this issue, aufs adds a new member called vm_prfile in struct
+vm_area_struct (and struct vm_region). The original vm_file points to
+the file on the branch fs in order to handle everything correctly as
+usual. The new vm_prfile points to a virtual file in aufs, and the
+show-functions in procfs refers to vm_prfile if it is set.
+Also we need to maintain several other places where touching vm_file
+such like
+- fork()/clone() copies vma and the reference count of vm_file is
+ incremented.
+- merging vma maintains the ref count too.
+
+This is not a good approach. It just fakes the printed path. But it
+leaves all behaviour around f_mapping unchanged. This is surely an
+advantage.
+Actually aufs had adopted another complicated approach which calls
+generic_file_mmap() and handles struct vm_operations_struct. In this
+approach, aufs met a hard problem and I could not solve it without
+switching the approach.
+
+There may be one more another approach which is
+- bind-mount the branch-root onto the aufs-root internally
+- grab the new vfsmount (ie. struct mount)
+- lazy-umount the branch-root internally
+- in open(2) the aufs-file, open the branch-file with the hidden
+ vfsmount (instead of the original branch's vfsmount)
+- ideally this "bind-mount and lazy-umount" should be done atomically,
+ but it may be possible from userspace by the mount helper.
+
+Adding the internal hidden vfsmount and using it in opening a file, the
+file path under /proc will be printed correctly. This approach looks
+smarter, but is not possible I am afraid.
+- aufs-root may be bind-mount later. when it happens, another hidden
+ vfsmount will be required.
+- it is hard to get the chance to bind-mount and lazy-umount
+ + in kernel-space, FS can have vfsmount in open(2) via
+ file->f_path, and aufs can know its vfsmount. But several locks are
+ already acquired, and if aufs tries to bind-mount and lazy-umount
+ here, then it may cause a deadlock.
+ + in user-space, bind-mount doesn't invoke the mount helper.
+- since /proc shows dev and ino, aufs has to give vma these info. it
+ means a new member vm_prinode will be necessary. this is essentially
+ equivalent to vm_prfile described above.
+
+I have to give up this "looks-smater" approach.
diff --git a/Documentation/filesystems/aufs/design/06xattr.txt b/Documentation/filesystems/aufs/design/06xattr.txt
new file mode 100644
index 000000000..7bfa94f7b
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/06xattr.txt
@@ -0,0 +1,81 @@
+
+# Copyright (C) 2014-2016 Junjiro R. Okajima
+
+Listing XATTR/EA and getting the value
+----------------------------------------------------------------------
+For the inode standard attributes (owner, group, timestamps, etc.), aufs
+shows the values from the topmost existing file. This behaviour is good
+for the non-dir entries since the bahaviour exactly matches the shown
+information. But for the directories, aufs considers all the same named
+entries on the lower branches. Which means, if one of the lower entry
+rejects readdir call, then aufs returns an error even if the topmost
+entry allows it. This behaviour is necessary to respect the branch fs's
+security, but can make users confused since the user-visible standard
+attributes don't match the behaviour.
+To address this issue, aufs has a mount option called dirperm1 which
+checks the permission for the topmost entry only, and ignores the lower
+entry's permission.
+
+A similar issue can happen around XATTR.
+getxattr(2) and listxattr(2) families behave as if dirperm1 option is
+always set. Otherwise these very unpleasant situation would happen.
+- listxattr(2) may return the duplicated entries.
+- users may not be able to remove or reset the XATTR forever,
+
+
+XATTR/EA support in the internal (copy,move)-(up,down)
+----------------------------------------------------------------------
+Generally the extended attributes of inode are categorized as these.
+- "security" for LSM and capability.
+- "system" for posix ACL, 'acl' mount option is required for the branch
+ fs generally.
+- "trusted" for userspace, CAP_SYS_ADMIN is required.
+- "user" for userspace, 'user_xattr' mount option is required for the
+ branch fs generally.
+
+Moreover there are some other categories. Aufs handles these rather
+unpopular categories as the ordinary ones, ie. there is no special
+condition nor exception.
+
+In copy-up, the support for XATTR on the dst branch may differ from the
+src branch. In this case, the copy-up operation will get an error and
+the original user operation which triggered the copy-up will fail. It
+can happen that even all copy-up will fail.
+When both of src and dst branches support XATTR and if an error occurs
+during copying XATTR, then the copy-up should fail obviously. That is a
+good reason and aufs should return an error to userspace. But when only
+the src branch support that XATTR, aufs should not return an error.
+For example, the src branch supports ACL but the dst branch doesn't
+because the dst branch may natively un-support it or temporary
+un-support it due to "noacl" mount option. Of course, the dst branch fs
+may NOT return an error even if the XATTR is not supported. It is
+totally up to the branch fs.
+
+Anyway when the aufs internal copy-up gets an error from the dst branch
+fs, then aufs tries removing the just copied entry and returns the error
+to the userspace. The worst case of this situation will be all copy-up
+will fail.
+
+For the copy-up operation, there two basic approaches.
+- copy the specified XATTR only (by category above), and return the
+ error unconditionally if it happens.
+- copy all XATTR, and ignore the error on the specified category only.
+
+In order to support XATTR and to implement the correct behaviour, aufs
+chooses the latter approach and introduces some new branch attributes,
+"icexsec", "icexsys", "icextr", "icexusr", and "icexoth".
+They correspond to the XATTR namespaces (see above). Additionally, to be
+convenient, "icex" is also provided which means all "icex*" attributes
+are set (here the word "icex" stands for "ignore copy-error on XATTR").
+
+The meaning of these attributes is to ignore the error from setting
+XATTR on that branch.
+Note that aufs tries copying all XATTR unconditionally, and ignores the
+error from the dst branch according to the specified attributes.
+
+Some XATTR may have its default value. The default value may come from
+the parent dir or the environment. If the default value is set at the
+file creating-time, it will be overwritten by copy-up.
+Some contradiction may happen I am afraid.
+Do we need another attribute to stop copying XATTR? I am unsure. For
+now, aufs implements the branch attributes to ignore the error.
diff --git a/Documentation/filesystems/aufs/design/07export.txt b/Documentation/filesystems/aufs/design/07export.txt
new file mode 100644
index 000000000..c23930b49
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/07export.txt
@@ -0,0 +1,45 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Export Aufs via NFS
+----------------------------------------------------------------------
+Here is an approach.
+- like xino/xib, add a new file 'xigen' which stores aufs inode
+ generation.
+- iget_locked(): initialize aufs inode generation for a new inode, and
+ store it in xigen file.
+- destroy_inode(): increment aufs inode generation and store it in xigen
+ file. it is necessary even if it is not unlinked, because any data of
+ inode may be changed by UDBA.
+- encode_fh(): for a root dir, simply return FILEID_ROOT. otherwise
+ build file handle by
+ + branch id (4 bytes)
+ + superblock generation (4 bytes)
+ + inode number (4 or 8 bytes)
+ + parent dir inode number (4 or 8 bytes)
+ + inode generation (4 bytes))
+ + return value of exportfs_encode_fh() for the parent on a branch (4
+ bytes)
+ + file handle for a branch (by exportfs_encode_fh())
+- fh_to_dentry():
+ + find the index of a branch from its id in handle, and check it is
+ still exist in aufs.
+ + 1st level: get the inode number from handle and search it in cache.
+ + 2nd level: if not found in cache, get the parent inode number from
+ the handle and search it in cache. and then open the found parent
+ dir, find the matching inode number by vfs_readdir() and get its
+ name, and call lookup_one_len() for the target dentry.
+ + 3rd level: if the parent dir is not cached, call
+ exportfs_decode_fh() for a branch and get the parent on a branch,
+ build a pathname of it, convert it a pathname in aufs, call
+ path_lookup(). now aufs gets a parent dir dentry, then handle it as
+ the 2nd level.
+ + to open the dir, aufs needs struct vfsmount. aufs keeps vfsmount
+ for every branch, but not itself. to get this, (currently) aufs
+ searches in current->nsproxy->mnt_ns list. it may not be a good
+ idea, but I didn't get other approach.
+ + test the generation of the gotten inode.
+- every inode operation: they may get EBUSY due to UDBA. in this case,
+ convert it into ESTALE for NFSD.
+- readdir(): call lockdep_on/off() because filldir in NFSD calls
+ lookup_one_len(), vfs_getattr(), encode_fh() and others.
diff --git a/Documentation/filesystems/aufs/design/08shwh.txt b/Documentation/filesystems/aufs/design/08shwh.txt
new file mode 100644
index 000000000..ad58ebe15
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/08shwh.txt
@@ -0,0 +1,39 @@
+
+# Copyright (C) 2005-2016 Junjiro R. Okajima
+
+Show Whiteout Mode (shwh)
+----------------------------------------------------------------------
+Generally aufs hides the name of whiteouts. But in some cases, to show
+them is very useful for users. For instance, creating a new middle layer
+(branch) by merging existing layers.
+
+(borrowing aufs1 HOW-TO from a user, Michael Towers)
+When you have three branches,
+- Bottom: 'system', squashfs (underlying base system), read-only
+- Middle: 'mods', squashfs, read-only
+- Top: 'overlay', ram (tmpfs), read-write
+
+The top layer is loaded at boot time and saved at shutdown, to preserve
+the changes made to the system during the session.
+When larger changes have been made, or smaller changes have accumulated,
+the size of the saved top layer data grows. At this point, it would be
+nice to be able to merge the two overlay branches ('mods' and 'overlay')
+and rewrite the 'mods' squashfs, clearing the top layer and thus
+restoring save and load speed.
+
+This merging is simplified by the use of another aufs mount, of just the
+two overlay branches using the 'shwh' option.
+# mount -t aufs -o ro,shwh,br:/livesys/overlay=ro+wh:/livesys/mods=rr+wh \
+ aufs /livesys/merge_union
+
+A merged view of these two branches is then available at
+/livesys/merge_union, and the new feature is that the whiteouts are
+visible!
+Note that in 'shwh' mode the aufs mount must be 'ro', which will disable
+writing to all branches. Also the default mode for all branches is 'ro'.
+It is now possible to save the combined contents of the two overlay
+branches to a new squashfs, e.g.:
+# mksquashfs /livesys/merge_union /path/to/newmods.squash
+
+This new squashfs archive can be stored on the boot device and the
+initramfs will use it to replace the old one at the next boot.
diff --git a/Documentation/filesystems/aufs/design/10dynop.txt b/Documentation/filesystems/aufs/design/10dynop.txt
new file mode 100644
index 000000000..49afc5899
--- /dev/null
+++ b/Documentation/filesystems/aufs/design/10dynop.txt
@@ -0,0 +1,34 @@
+
+# Copyright (C) 2010-2016 Junjiro R. Okajima
+
+Dynamically customizable FS operations
+----------------------------------------------------------------------
+Generally FS operations (struct inode_operations, struct
+address_space_operations, struct file_operations, etc.) are defined as
+"static const", but it never means that FS have only one set of
+operation. Some FS have multiple sets of them. For instance, ext2 has
+three sets, one for XIP, for NOBH, and for normal.
+Since aufs overrides and redirects these operations, sometimes aufs has
+to change its behaviour according to the branch FS type. More importantly
+VFS acts differently if a function (member in the struct) is set or
+not. It means aufs should have several sets of operations and select one
+among them according to the branch FS definition.
+
+In order to solve this problem and not to affect the behaviour of VFS,
+aufs defines these operations dynamically. For instance, aufs defines
+dummy direct_IO function for struct address_space_operations, but it may
+not be set to the address_space_operations actually. When the branch FS
+doesn't have it, aufs doesn't set it to its address_space_operations
+while the function definition itself is still alive. So the behaviour
+itself will not change, and it will return an error when direct_IO is
+not set.
+
+The lifetime of these dynamically generated operation object is
+maintained by aufs branch object. When the branch is removed from aufs,
+the reference counter of the object is decremented. When it reaches
+zero, the dynamically generated operation object will be freed.
+
+This approach is designed to support AIO (io_submit), Direct I/O and
+XIP (DAX) mainly.
+Currently this approach is applied to address_space_operations for
+regular files only.
diff --git a/Documentation/scheduler/sched-BFS.txt b/Documentation/scheduler/sched-BFS.txt
new file mode 100644
index 000000000..77a1e37b7
--- /dev/null
+++ b/Documentation/scheduler/sched-BFS.txt
@@ -0,0 +1,357 @@
+BFS - The Brain Fuck Scheduler by Con Kolivas.
+
+Goals.
+
+The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to
+completely do away with the complex designs of the past for the cpu process
+scheduler and instead implement one that is very simple in basic design.
+The main focus of BFS is to achieve excellent desktop interactivity and
+responsiveness without heuristics and tuning knobs that are difficult to
+understand, impossible to model and predict the effect of, and when tuned to
+one workload cause massive detriment to another.
+
+
+Design summary.
+
+BFS is best described as a single runqueue, O(n) lookup, earliest effective
+virtual deadline first design, loosely based on EEVDF (earliest eligible virtual
+deadline first) and my previous Staircase Deadline scheduler. Each component
+shall be described in order to understand the significance of, and reasoning for
+it. The codebase when the first stable version was released was approximately
+9000 lines less code than the existing mainline linux kernel scheduler (in
+2.6.31). This does not even take into account the removal of documentation and
+the cgroups code that is not used.
+
+Design reasoning.
+
+The single runqueue refers to the queued but not running processes for the
+entire system, regardless of the number of CPUs. The reason for going back to
+a single runqueue design is that once multiple runqueues are introduced,
+per-CPU or otherwise, there will be complex interactions as each runqueue will
+be responsible for the scheduling latency and fairness of the tasks only on its
+own runqueue, and to achieve fairness and low latency across multiple CPUs, any
+advantage in throughput of having CPU local tasks causes other disadvantages.
+This is due to requiring a very complex balancing system to at best achieve some
+semblance of fairness across CPUs and can only maintain relatively low latency
+for tasks bound to the same CPUs, not across them. To increase said fairness
+and latency across CPUs, the advantage of local runqueue locking, which makes
+for better scalability, is lost due to having to grab multiple locks.
+
+A significant feature of BFS is that all accounting is done purely based on CPU
+used and nowhere is sleep time used in any way to determine entitlement or
+interactivity. Interactivity "estimators" that use some kind of sleep/run
+algorithm are doomed to fail to detect all interactive tasks, and to falsely tag
+tasks that aren't interactive as being so. The reason for this is that it is
+close to impossible to determine that when a task is sleeping, whether it is
+doing it voluntarily, as in a userspace application waiting for input in the
+form of a mouse click or otherwise, or involuntarily, because it is waiting for
+another thread, process, I/O, kernel activity or whatever. Thus, such an
+estimator will introduce corner cases, and more heuristics will be required to
+cope with those corner cases, introducing more corner cases and failed
+interactivity detection and so on. Interactivity in BFS is built into the design
+by virtue of the fact that tasks that are waking up have not used up their quota
+of CPU time, and have earlier effective deadlines, thereby making it very likely
+they will preempt any CPU bound task of equivalent nice level. See below for
+more information on the virtual deadline mechanism. Even if they do not preempt
+a running task, because the rr interval is guaranteed to have a bound upper
+limit on how long a task will wait for, it will be scheduled within a timeframe
+that will not cause visible interface jitter.
+
+
+Design details.
+
+Task insertion.
+
+BFS inserts tasks into each relevant queue as an O(1) insertion into a double
+linked list. On insertion, *every* running queue is checked to see if the newly
+queued task can run on any idle queue, or preempt the lowest running task on the
+system. This is how the cross-CPU scheduling of BFS achieves significantly lower
+latency per extra CPU the system has. In this case the lookup is, in the worst
+case scenario, O(n) where n is the number of CPUs on the system.
+
+Data protection.
+
+BFS has one single lock protecting the process local data of every task in the
+global queue. Thus every insertion, removal and modification of task data in the
+global runqueue needs to grab the global lock. However, once a task is taken by
+a CPU, the CPU has its own local data copy of the running process' accounting
+information which only that CPU accesses and modifies (such as during a
+timer tick) thus allowing the accounting data to be updated lockless. Once a
+CPU has taken a task to run, it removes it from the global queue. Thus the
+global queue only ever has, at most,
+
+ (number of tasks requesting cpu time) - (number of logical CPUs) + 1
+
+tasks in the global queue. This value is relevant for the time taken to look up
+tasks during scheduling. This will increase if many tasks with CPU affinity set
+in their policy to limit which CPUs they're allowed to run on if they outnumber
+the number of CPUs. The +1 is because when rescheduling a task, the CPU's
+currently running task is put back on the queue. Lookup will be described after
+the virtual deadline mechanism is explained.
+
+Virtual deadline.
+
+The key to achieving low latency, scheduling fairness, and "nice level"
+distribution in BFS is entirely in the virtual deadline mechanism. The related
+tunable in BFS is the rr_interval, or "round robin interval". This is the
+maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy)
+tasks of the same nice level will be running for, or looking at it the other
+way around, the longest duration two tasks of the same nice level will be
+delayed for. When a task requests cpu time, it is given a quota (time_slice)
+equal to the rr_interval and a virtual deadline. The virtual deadline is
+offset from the current time in jiffies by this equation:
+
+ jiffies + (prio_ratio * rr_interval)
+
+The prio_ratio is determined as a ratio compared to the baseline of nice -20
+and increases by 10% per nice level. The deadline is a virtual one only in that
+no guarantee is placed that a task will actually be scheduled by this time, but
+it is used to compare which task should go next. There are three components to
+how a task is next chosen. First is time_slice expiration. If a task runs out
+of its time_slice, it is descheduled, the time_slice is refilled, and the
+deadline reset to that formula above. Second is sleep, where a task no longer
+is requesting CPU for whatever reason. The time_slice and deadline are _not_
+adjusted in this case and are just carried over for when the task is next
+scheduled. Third is preemption, and that is when a newly waking task is deemed
+higher priority than a currently running task on any cpu by virtue of the fact
+that it has an earlier virtual deadline than the currently running task. The
+earlier deadline is the key to which task is next chosen for the first and
+second cases. Once a task is descheduled, it is put back on the queue, and an
+O(n) lookup of all queued-but-not-running tasks is done to determine which has
+the earliest deadline and that task is chosen to receive CPU next.
+
+The CPU proportion of different nice tasks works out to be approximately the
+
+ (prio_ratio difference)^2
+
+The reason it is squared is that a task's deadline does not change while it is
+running unless it runs out of time_slice. Thus, even if the time actually
+passes the deadline of another task that is queued, it will not get CPU time
+unless the current running task deschedules, and the time "base" (jiffies) is
+constantly moving.
+
+Task lookup.
+
+BFS has 103 priority queues. 100 of these are dedicated to the static priority
+of realtime tasks, and the remaining 3 are, in order of best to worst priority,
+SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority
+scheduling). When a task of these priorities is queued, a bitmap of running
+priorities is set showing which of these priorities has tasks waiting for CPU
+time. When a CPU is made to reschedule, the lookup for the next task to get
+CPU time is performed in the following way:
+
+First the bitmap is checked to see what static priority tasks are queued. If
+any realtime priorities are found, the corresponding queue is checked and the
+first task listed there is taken (provided CPU affinity is suitable) and lookup
+is complete. If the priority corresponds to a SCHED_ISO task, they are also
+taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds
+to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this
+stage, every task in the runlist that corresponds to that priority is checked
+to see which has the earliest set deadline, and (provided it has suitable CPU
+affinity) it is taken off the runqueue and given the CPU. If a task has an
+expired deadline, it is taken and the rest of the lookup aborted (as they are
+chosen in FIFO order).
+
+Thus, the lookup is O(n) in the worst case only, where n is as described
+earlier, as tasks may be chosen before the whole task list is looked over.
+
+
+Scalability.
+
+The major limitations of BFS will be that of scalability, as the separate
+runqueue designs will have less lock contention as the number of CPUs rises.
+However they do not scale linearly even with separate runqueues as multiple
+runqueues will need to be locked concurrently on such designs to be able to
+achieve fair CPU balancing, to try and achieve some sort of nice-level fairness
+across CPUs, and to achieve low enough latency for tasks on a busy CPU when
+other CPUs would be more suited. BFS has the advantage that it requires no
+balancing algorithm whatsoever, as balancing occurs by proxy simply because
+all CPUs draw off the global runqueue, in priority and deadline order. Despite
+the fact that scalability is _not_ the prime concern of BFS, it both shows very
+good scalability to smaller numbers of CPUs and is likely a more scalable design
+at these numbers of CPUs.
+
+It also has some very low overhead scalability features built into the design
+when it has been deemed their overhead is so marginal that they're worth adding.
+The first is the local copy of the running process' data to the CPU it's running
+on to allow that data to be updated lockless where possible. Then there is
+deference paid to the last CPU a task was running on, by trying that CPU first
+when looking for an idle CPU to use the next time it's scheduled. Finally there
+is the notion of "sticky" tasks that are flagged when they are involuntarily
+descheduled, meaning they still want further CPU time. This sticky flag is
+used to bias heavily against those tasks being scheduled on a different CPU
+unless that CPU would be otherwise idle. When a cpu frequency governor is used
+that scales with CPU load, such as ondemand, sticky tasks are not scheduled
+on a different CPU at all, preferring instead to go idle. This means the CPU
+they were bound to is more likely to increase its speed while the other CPU
+will go idle, thus speeding up total task execution time and likely decreasing
+power usage. This is the only scenario where BFS will allow a CPU to go idle
+in preference to scheduling a task on the earliest available spare CPU.
+
+The real cost of migrating a task from one CPU to another is entirely dependant
+on the cache footprint of the task, how cache intensive the task is, how long
+it's been running on that CPU to take up the bulk of its cache, how big the CPU
+cache is, how fast and how layered the CPU cache is, how fast a context switch
+is... and so on. In other words, it's close to random in the real world where we
+do more than just one sole workload. The only thing we can be sure of is that
+it's not free. So BFS uses the principle that an idle CPU is a wasted CPU and
+utilising idle CPUs is more important than cache locality, and cache locality
+only plays a part after that.
+
+When choosing an idle CPU for a waking task, the cache locality is determined
+according to where the task last ran and then idle CPUs are ranked from best
+to worst to choose the most suitable idle CPU based on cache locality, NUMA
+node locality and hyperthread sibling business. They are chosen in the
+following preference (if idle):
+
+* Same core, idle or busy cache, idle threads
+* Other core, same cache, idle or busy cache, idle threads.
+* Same node, other CPU, idle cache, idle threads.
+* Same node, other CPU, busy cache, idle threads.
+* Same core, busy threads.
+* Other core, same cache, busy threads.
+* Same node, other CPU, busy threads.
+* Other node, other CPU, idle cache, idle threads.
+* Other node, other CPU, busy cache, idle threads.
+* Other node, other CPU, busy threads.
+
+This shows the SMT or "hyperthread" awareness in the design as well which will
+choose a real idle core first before a logical SMT sibling which already has
+tasks on the physical CPU.
+
+Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark.
+However this benchmarking was performed on an earlier design that was far less
+scalable than the current one so it's hard to know how scalable it is in terms
+of both CPUs (due to the global runqueue) and heavily loaded machines (due to
+O(n) lookup) at this stage. Note that in terms of scalability, the number of
+_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x)
+quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark
+results are very promising indeed. Benchmark contributions are most welcome.
+
+
+Features
+
+As the initial prime target audience for BFS was the average desktop user, it
+was designed to not need tweaking, tuning or have features set to obtain benefit
+from it. Thus the number of knobs and features has been kept to an absolute
+minimum and should not require extra user input for the vast majority of cases.
+There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval
+and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition
+to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is
+support for CGROUPS. The average user should neither need to know what these
+are, nor should they need to be using them to have good desktop behaviour.
+
+There are two "scheduler" tunables, the round robin interval and the
+interactive flag. These can be accessed in
+
+ /proc/sys/kernel/rr_interval
+ /proc/sys/kernel/interactive
+
+rr_interval value
+
+The value is in milliseconds, and the default value is set to 6ms. Valid values
+are from 1 to 1000. Decreasing the value will decrease latencies at the cost of
+decreasing throughput, while increasing it will improve throughput, but at the
+cost of worsening latencies. The accuracy of the rr interval is limited by HZ
+resolution of the kernel configuration. Thus, the worst case latencies are
+usually slightly higher than this actual value. BFS uses "dithering" to try and
+minimise the effect the Hz limitation has. The default value of 6 is not an
+arbitrary one. It is based on the fact that humans can detect jitter at
+approximately 7ms, so aiming for much lower latencies is pointless under most
+circumstances. It is worth noting this fact when comparing the latency
+performance of BFS to other schedulers. Worst case latencies being higher than
+7ms are far worse than average latencies not being in the microsecond range.
+Experimentation has shown that rr intervals being increased up to 300 can
+improve throughput but beyond that, scheduling noise from elsewhere prevents
+further demonstrable throughput.
+
+interactive flag
+
+This is a simple boolean that can be set to 1 or 0, set to 1 by default. This
+sacrifices some of the interactive performance by giving tasks a degree of
+soft affinity for logical CPUs when it will lead to improved throughput, but
+enabling it also sacrifices the completely deterministic nature with respect
+to latency that BFS otherwise normally provides, and subsequently leads to
+slightly higher latencies and a noticeably less interactive system.
+
+
+Isochronous scheduling.
+
+Isochronous scheduling is a unique scheduling policy designed to provide
+near-real-time performance to unprivileged (ie non-root) users without the
+ability to starve the machine indefinitely. Isochronous tasks (which means
+"same time") are set using, for example, the schedtool application like so:
+
+ schedtool -I -e amarok
+
+This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works
+is that it has a priority level between true realtime tasks and SCHED_NORMAL
+which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie,
+if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval
+rate). However if ISO tasks run for more than a tunable finite amount of time,
+they are then demoted back to SCHED_NORMAL scheduling. This finite amount of
+time is the percentage of _total CPU_ available across the machine, configurable
+as a percentage in the following "resource handling" tunable (as opposed to a
+scheduler tunable):
+
+ /proc/sys/kernel/iso_cpu
+
+and is set to 70% by default. It is calculated over a rolling 5 second average
+Because it is the total CPU available, it means that on a multi CPU machine, it
+is possible to have an ISO task running as realtime scheduling indefinitely on
+just one CPU, as the other CPUs will be available. Setting this to 100 is the
+equivalent of giving all users SCHED_RR access and setting it to 0 removes the
+ability to run any pseudo-realtime tasks.
+
+A feature of BFS is that it detects when an application tries to obtain a
+realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the
+appropriate privileges to use those policies. When it detects this, it will
+give the task SCHED_ISO policy instead. Thus it is transparent to the user.
+Because some applications constantly set their policy as well as their nice
+level, there is potential for them to undo the override specified by the user
+on the command line of setting the policy to SCHED_ISO. To counter this, once
+a task has been set to SCHED_ISO policy, it needs superuser privileges to set
+it back to SCHED_NORMAL. This will ensure the task remains ISO and all child
+processes and threads will also inherit the ISO policy.
+
+Idleprio scheduling.
+
+Idleprio scheduling is a scheduling policy designed to give out CPU to a task
+_only_ when the CPU would be otherwise idle. The idea behind this is to allow
+ultra low priority tasks to be run in the background that have virtually no
+effect on the foreground tasks. This is ideally suited to distributed computing
+clients (like setiathome, folding, mprime etc) but can also be used to start
+a video encode or so on without any slowdown of other tasks. To avoid this
+policy from grabbing shared resources and holding them indefinitely, if it
+detects a state where the task is waiting on I/O, the machine is about to
+suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As
+per the Isochronous task management, once a task has been scheduled as IDLEPRIO,
+it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can
+be set to start as SCHED_IDLEPRIO with the schedtool command like so:
+
+ schedtool -D -e ./mprime
+
+Subtick accounting.
+
+It is surprisingly difficult to get accurate CPU accounting, and in many cases,
+the accounting is done by simply determining what is happening at the precise
+moment a timer tick fires off. This becomes increasingly inaccurate as the
+timer tick frequency (HZ) is lowered. It is possible to create an application
+which uses almost 100% CPU, yet by being descheduled at the right time, records
+zero CPU usage. While the main problem with this is that there are possible
+security implications, it is also difficult to determine how much CPU a task
+really does use. BFS tries to use the sub-tick accounting from the TSC clock,
+where possible, to determine real CPU usage. This is not entirely reliable, but
+is far more likely to produce accurate CPU usage data than the existing designs
+and will not show tasks as consuming no CPU usage when they actually are. Thus,
+the amount of CPU reported as being used by BFS will more accurately represent
+how much CPU the task itself is using (as is shown for example by the 'time'
+application), so the reported values may be quite different to other schedulers.
+Values reported as the 'load' are more prone to problems with this design, but
+per process values are closer to real usage. When comparing throughput of BFS
+to other designs, it is important to compare the actual completed work in terms
+of total wall clock time taken and total work done, rather than the reported
+"cpu usage".
+
+
+Con Kolivas <kernel@kolivas.org> Tue, 5 Apr 2011
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 87119dc9b..f77a2514e 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -39,6 +39,7 @@ show up in /proc/sys/kernel:
- hung_task_timeout_secs
- hung_task_warnings
- kexec_load_disabled
+- iso_cpu
- kptr_restrict
- kstack_depth_to_print [ X86 only ]
- l2cr [ PPC only ]
@@ -67,6 +68,7 @@ show up in /proc/sys/kernel:
- randomize_va_space
- real-root-dev ==> Documentation/initrd.txt
- reboot-cmd [ SPARC only ]
+- rr_interval
- rtsig-max
- rtsig-nr
- sem
@@ -396,6 +398,16 @@ kernel stack.
==============================================================
+iso_cpu: (BFS CPU scheduler only).
+
+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
+run effectively at realtime priority, averaged over a rolling five
+seconds over the -whole- system, meaning all cpus.
+
+Set to 70 (percent) by default.
+
+==============================================================
+
l2cr: (PPC only)
This flag controls the L2 cache of G3 processor boards. If
@@ -750,6 +762,20 @@ rebooting. ???
==============================================================
+rr_interval: (BFS CPU scheduler only)
+
+This is the smallest duration that any cpu process scheduling unit
+will run for. Increasing this value can increase throughput of cpu
+bound tasks substantially but at the expense of increased latencies
+overall. Conversely decreasing it will decrease average and maximum
+latencies but at the expense of throughput. This value is in
+milliseconds and the default value chosen depends on the number of
+cpus available at scheduler initialisation with a minimum of 6.
+
+Valid values are from 1-1000.
+
+==============================================================
+
rtsig-max & rtsig-nr:
The file rtsig-max can be used to tune the maximum number
diff --git a/MAINTAINERS b/MAINTAINERS
index ef7947adf..cd862e1df 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -228,13 +228,13 @@ F: kernel/sys_ni.c
ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/abituguru.c
ABIT UGURU 3 HARDWARE MONITOR DRIVER
M: Alistair John Strachan <alistair@devzero.co.uk>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/abituguru3.c
@@ -386,14 +386,14 @@ F: Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
ADM1025 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/adm1025
F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER
M: Corentin Labbe <clabbe.montjoie@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/adm1029.c
@@ -438,7 +438,7 @@ F: drivers/video/backlight/adp8860_bl.c
ADS1015 HARDWARE MONITOR DRIVER
M: Dirk Eibach <eibach@gdsys.de>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ads1015
F: drivers/hwmon/ads1015.c
@@ -451,7 +451,7 @@ F: drivers/macintosh/therm_adt746x.c
ADT7475 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/adt7475
F: drivers/hwmon/adt7475.c
@@ -628,7 +628,7 @@ F: include/linux/ccp.h
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/hwmon/fam15h_power
F: drivers/hwmon/fam15h_power.c
@@ -786,7 +786,7 @@ F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
M: Henrik Rydberg <rydberg@bitmath.org>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Odd fixes
F: drivers/hwmon/applesmc.c
@@ -1825,7 +1825,7 @@ F: include/media/i2c/as3645a.h
ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/asc7621
F: drivers/hwmon/asc7621.c
@@ -1918,7 +1918,7 @@ F: drivers/net/wireless/ath/carl9170/
ATK0110 HWMON DRIVER
M: Luca Tettamanti <kronos.it@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/asus_atk0110.c
@@ -2082,6 +2082,19 @@ F: include/linux/audit.h
F: include/uapi/linux/audit.h
F: kernel/audit*
+AUFS (advanced multi layered unification filesystem) FILESYSTEM
+M: "J. R. Okajima" <hooanon05g@gmail.com>
+L: linux-unionfs@vger.kernel.org
+L: aufs-users@lists.sourceforge.net (members only)
+W: http://aufs.sourceforge.net
+T: git://github.com/sfjro/aufs4-linux.git
+S: Supported
+F: Documentation/filesystems/aufs/
+F: Documentation/ABI/testing/debugfs-aufs
+F: Documentation/ABI/testing/sysfs-aufs
+F: fs/aufs/
+F: include/uapi/linux/aufs_type.h
+
AUXILIARY DISPLAY DRIVERS
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
W: http://miguelojeda.es/auxdisplay.htm
@@ -3037,7 +3050,7 @@ F: mm/swap_cgroup.c
CORETEMP HARDWARE MONITORING DRIVER
M: Fenghua Yu <fenghua.yu@intel.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/coretemp
F: drivers/hwmon/coretemp.c
@@ -3625,7 +3638,7 @@ T: git git://git.infradead.org/users/vkoul/slave-dma.git
DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/dme1737
F: drivers/hwmon/dme1737.c
@@ -4322,7 +4335,7 @@ F: include/video/exynos_mipi*
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/f71805f
F: drivers/hwmon/f71805f.c
@@ -4401,7 +4414,7 @@ F: fs/*
FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: Riku Voipio <riku.voipio@iki.fi>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/f75375s.c
F: include/linux/f75375s.h
@@ -4958,8 +4971,8 @@ F: drivers/media/usb/hackrf/
HARDWARE MONITORING
M: Jean Delvare <jdelvare@suse.com>
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
-W: http://www.lm-sensors.org/
+L: linux-hwmon@vger.kernel.org
+W: http://hwmon.wiki.kernel.org/
T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
@@ -5484,7 +5497,7 @@ F: drivers/usb/atm/ueagle-atm.c
INA209 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ina209
F: Documentation/devicetree/bindings/i2c/ina209.txt
@@ -5492,7 +5505,7 @@ F: drivers/hwmon/ina209.c
INA2XX HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ina2xx
F: drivers/hwmon/ina2xx.c
@@ -5985,7 +5998,7 @@ F: drivers/isdn/hardware/eicon/
IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/it87
F: drivers/hwmon/it87.c
@@ -6021,7 +6034,7 @@ F: drivers/media/dvb-frontends/ix2505v*
JC42.4 TEMPERATURE SENSOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/jc42.c
F: Documentation/hwmon/jc42
@@ -6071,14 +6084,14 @@ F: drivers/tty/serial/jsm/
K10TEMP HARDWARE MONITORING DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/k10temp
F: drivers/hwmon/k10temp.c
K8TEMP HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/k8temp
F: drivers/hwmon/k8temp.c
@@ -6605,27 +6618,27 @@ F: net/llc/
LM73 HARDWARE MONITOR DRIVER
M: Guillaume Ligneul <guillaume.ligneul@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/lm73.c
LM78 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm78
F: drivers/hwmon/lm78.c
LM83 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm83
F: drivers/hwmon/lm83.c
LM90 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm90
F: Documentation/devicetree/bindings/hwmon/lm90.txt
@@ -6633,7 +6646,7 @@ F: drivers/hwmon/lm90.c
LM95234 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/lm95234
F: drivers/hwmon/lm95234.c
@@ -6700,7 +6713,7 @@ F: drivers/scsi/sym53c8xx_2/
LTC4261 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/ltc4261
F: drivers/hwmon/ltc4261.c
@@ -6870,28 +6883,28 @@ F: include/uapi/linux/matroxfb.h
MAX16065 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max16065
F: drivers/hwmon/max16065.c
MAX20751 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max20751
F: drivers/hwmon/max20751.c
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
M: "Hans J. Koch" <hjk@hansjkoch.de>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max6650
F: drivers/hwmon/max6650.c
MAX6697 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/max6697
F: Documentation/devicetree/bindings/i2c/max6697.txt
@@ -7455,7 +7468,7 @@ F: drivers/scsi/NCR_D700.*
NCT6775 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/nct6775
F: drivers/hwmon/nct6775.c
@@ -8235,7 +8248,7 @@ F: drivers/video/logo/logo_parisc*
PC87360 HARDWARE MONITORING DRIVER
M: Jim Cromie <jim.cromie@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/pc87360
F: drivers/hwmon/pc87360.c
@@ -8247,7 +8260,7 @@ F: drivers/char/pc8736x_gpio.c
PC87427 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/pc87427
F: drivers/hwmon/pc87427.c
@@ -8601,8 +8614,8 @@ F: drivers/rtc/rtc-puv3.c
PMBUS HARDWARE MONITORING DRIVERS
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
-W: http://www.lm-sensors.org/
+L: linux-hwmon@vger.kernel.org
+W: http://hwmon.wiki.kernel.org/
W: http://www.roeck-us.net/linux/drivers/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
@@ -8807,7 +8820,7 @@ F: drivers/media/usb/pwc/*
PWM FAN DRIVER
M: Kamil Debski <k.debski@samsung.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
F: Documentation/hwmon/pwm-fan
@@ -10113,28 +10126,28 @@ F: Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/smm665
F: drivers/hwmon/smm665.c
SMSC EMC2103 HARDWARE MONITOR DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/emc2103
F: drivers/hwmon/emc2103.c
SMSC SCH5627 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/hwmon/sch5627
F: drivers/hwmon/sch5627.c
SMSC47B397 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/smsc47b397
F: drivers/hwmon/smsc47b397.c
@@ -11067,7 +11080,7 @@ F: include/linux/mmc/sh_mobile_sdhi.h
TMP401 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/tmp401
F: drivers/hwmon/tmp401.c
@@ -11819,14 +11832,14 @@ F: Documentation/networking/vrf.txt
VT1211 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/vt1211
F: drivers/hwmon/vt1211.c
VT8231 HARDWARE MONITOR DRIVER
M: Roger Lucas <vt8231@hiddenengine.co.uk>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/vt8231.c
@@ -11845,21 +11858,21 @@ F: drivers/w1/
W83791D HARDWARE MONITORING DRIVER
M: Marc Hulsman <m.hulsman@tudelft.nl>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/w83791d
F: drivers/hwmon/w83791d.c
W83793 HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/hwmon/w83793
F: drivers/hwmon/w83793.c
W83795 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
-L: lm-sensors@lm-sensors.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/w83795.c
diff --git a/Makefile b/Makefile
index 06fe3c37e..554c873b1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 5
-SUBLEVEL = 0
+SUBLEVEL = 1
EXTRAVERSION = -gnu
NAME = Blurry Fish Butt
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 44a578c10..ab5d5701e 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -47,6 +47,14 @@
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
ehci@0x40000 {
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 57c1f3384..0352fb8d2 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
\
m += nr >> 5; \
\
- /* \
- * ARC ISA micro-optimization: \
- * \
- * Instructions dealing with bitpos only consider lower 5 bits \
- * e.g (x << 33) is handled like (x << 1) by ASL instruction \
- * (mem pointer still needs adjustment to point to next word) \
- * \
- * Hence the masking to clamp @nr arg can be elided in general. \
- * \
- * However if @nr is a constant (above assumed in a register), \
- * and greater than 31, gcc can optimize away (x << 33) to 0, \
- * as overflow, given the 32-bit ISA. Thus masking needs to be \
- * done for const @nr, but no code is generated due to gcc \
- * const prop. \
- */ \
nr &= 0x1f; \
\
__asm__ __volatile__( \
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 694ece8a0..27b17adea 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -129,15 +129,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
/*
- * Relaxed API for drivers which can handle any ordering themselves
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
*/
#define readb_relaxed(c) __raw_readb(c)
-#define readw_relaxed(c) __raw_readw(c)
-#define readl_relaxed(c) __raw_readl(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
#define writeb_relaxed(v,c) __raw_writeb(v,c)
-#define writew_relaxed(v,c) __raw_writew(v,c)
-#define writel_relaxed(v,c) __raw_writel(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#include <asm-generic/io.h>
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index ff888d21c..f3e2b96c0 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -303,6 +303,7 @@
regulator-name = "mmc0-card-supply";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-always-on;
};
gpio_keys {
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 569026e8f..da84e65b5 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -268,5 +268,6 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_3v3_reg>;
+ regulator-always-on;
};
};
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 819aff5d5..727321078 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -279,7 +279,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_valid(pte)) {
+ if (pte_present(pte)) {
if (pte_sw_dirty(pte) && pte_write(pte))
pte_val(pte) &= ~PTE_RDONLY;
else
@@ -649,6 +649,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
* bits 0-1: present (must be zero)
* bits 2-7: swap type
* bits 8-57: swap offset
+ * bit 58: PTE_PROT_NONE (must be zero)
*/
#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 6
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index a865d2a04..5de673ac9 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -433,6 +433,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo
return ioremap(phys_addr, size);
}
#define ioremap_cache ioremap_cache
+#define ioremap_uc ioremap_nocache
/*
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 998f632e7..5c80a0f9d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -64,11 +64,6 @@ static struct timer_list spusched_timer;
static struct timer_list spuloadavg_timer;
/*
- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
- */
-#define NORMAL_PRIO 120
-
-/*
* Frequency of the spu scheduler tick. By default we do one SPU scheduler
* tick for every 10 CPU scheduler ticks.
*/
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c873e682b..2b2ced9dc 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -45,7 +45,7 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
-} __packed __aligned(16);
+} __packed __aligned(64);
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cd5a19138..c920b81be 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1199,114 +1199,12 @@ cleanup_critical:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
- clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
- jhe 5f
- clg %r9,BASED(.Lcleanup_save_fpu_regs_fp)
- jhe 4f
- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
- jhe 3f
- clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
- jhe 2f
- clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
- jhe 1f
- lg %r2,__LC_CURRENT
- aghi %r2,__TASK_thread
-0: # Store floating-point controls
- stfpc __THREAD_FPU_fpc(%r2)
-1: # Load register save area and check if VX is active
- lg %r3,__THREAD_FPU_regs(%r2)
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- jz 4f # no VX -> store FP regs
-2: # Store vector registers (V0-V15)
- VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
-3: # Store vector registers (V16-V31)
- VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
- j 5f # -> done, set CIF_FPU flag
-4: # Store floating-point registers
- std 0,0(%r3)
- std 1,8(%r3)
- std 2,16(%r3)
- std 3,24(%r3)
- std 4,32(%r3)
- std 5,40(%r3)
- std 6,48(%r3)
- std 7,56(%r3)
- std 8,64(%r3)
- std 9,72(%r3)
- std 10,80(%r3)
- std 11,88(%r3)
- std 12,96(%r3)
- std 13,104(%r3)
- std 14,112(%r3)
- std 15,120(%r3)
-5: # Set CIF_FPU flag
- oi __LC_CPU_FLAGS+7,_CIF_FPU
- lg %r9,48(%r11) # return from save_fpu_regs
+ larl %r9,save_fpu_regs
br %r14
-.Lcleanup_save_fpu_fpc_end:
- .quad .Lsave_fpu_regs_fpc_end
-.Lcleanup_save_fpu_regs_vx_low:
- .quad .Lsave_fpu_regs_vx_low
-.Lcleanup_save_fpu_regs_vx_high:
- .quad .Lsave_fpu_regs_vx_high
-.Lcleanup_save_fpu_regs_fp:
- .quad .Lsave_fpu_regs_fp
-.Lcleanup_save_fpu_regs_done:
- .quad .Lsave_fpu_regs_done
.Lcleanup_load_fpu_regs:
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
- clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
- jhe 1f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
- jhe 2f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
- jhe 3f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
- jhe 4f
- lg %r4,__LC_CURRENT
- aghi %r4,__TASK_thread
- lfpc __THREAD_FPU_fpc(%r4)
- TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
- lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
- jz 2f # -> no VX, load FP regs
-4: # Load V0 ..V15 registers
- VLM %v0,%v15,0,%r4
-3: # Load V16..V31 registers
- VLM %v16,%v31,256,%r4
- j 1f
-2: # Load floating-point registers
- ld 0,0(%r4)
- ld 1,8(%r4)
- ld 2,16(%r4)
- ld 3,24(%r4)
- ld 4,32(%r4)
- ld 5,40(%r4)
- ld 6,48(%r4)
- ld 7,56(%r4)
- ld 8,64(%r4)
- ld 9,72(%r4)
- ld 10,80(%r4)
- ld 11,88(%r4)
- ld 12,96(%r4)
- ld 13,104(%r4)
- ld 14,112(%r4)
- ld 15,120(%r4)
-1: # Clear CIF_FPU bit
- ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- lg %r9,48(%r11) # return from load_fpu_regs
+ larl %r9,load_fpu_regs
br %r14
-.Lcleanup_load_fpu_regs_vx:
- .quad .Lload_fpu_regs_vx
-.Lcleanup_load_fpu_regs_vx_high:
- .quad .Lload_fpu_regs_vx_high
-.Lcleanup_load_fpu_regs_fp:
- .quad .Lload_fpu_regs_fp
-.Lcleanup_load_fpu_regs_done:
- .quad .Lload_fpu_regs_done
/*
* Integer constants
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 9220db5c9..93fc63ef6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -327,6 +327,7 @@ static void __init setup_lowcore(void)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
+ lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 8f19c8f9d..8f75edc99 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -864,8 +864,11 @@ static inline int barsize(u8 size)
static int zpci_mem_init(void)
{
+ BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
+ __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
+
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
- 16, 0, NULL);
+ __alignof__(struct zpci_fmb), 0, NULL);
if (!zdev_fmb_cache)
goto error_fmb;
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index ec29e14ec..bf25d7c79 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -36,6 +36,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
+ preempt_disable();
pagefault_disable();
idx = FIX_CMAP_END -
@@ -64,4 +65,5 @@ void kunmap_coherent(void *kvaddr)
}
pagefault_enable();
+ preempt_enable();
}
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index b821b13d3..8a6b57108 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
ptr += strlen("proc");
ptr = skip_spaces(ptr);
- file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
+ file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
if (IS_ERR(file)) {
mconsole_reply(req, "Failed to open file", 1, 0);
printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c46662f64..b971ea4be 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -911,10 +911,26 @@ config SCHED_SMT
depends on SMP
---help---
SMT scheduler support improves the CPU scheduler's decision making
- when dealing with Intel Pentium 4 chips with HyperThreading at a
+ when dealing with Intel P4/Core 2 chips with HyperThreading at a
cost of slightly increased overhead in some places. If unsure say
N here.
+config SMT_NICE
+ bool "SMT (Hyperthreading) aware nice priority and policy support"
+ depends on SCHED_BFS && SCHED_SMT
+ default y
+ ---help---
+ Enabling Hyperthreading on Intel CPUs decreases the effectiveness
+ of the use of 'nice' levels and different scheduling policies
+ (e.g. realtime) due to sharing of CPU power between hyperthreads.
+ SMT nice support makes each logical CPU aware of what is running on
+ its hyperthread siblings, maintaining appropriate distribution of
+ CPU according to nice levels and scheduling policies at the expense
+ of slightly increased overhead.
+
+ If unsure say Y here.
+
+
config SCHED_MC
def_bool y
prompt "Multi-core scheduler support"
@@ -1160,22 +1176,23 @@ config MICROCODE
bool "CPU microcode loading support"
default y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
- depends on BLK_DEV_INITRD
select FW_LOADER
---help---
-
If you say Y here, you will be able to update the microcode on
- certain Intel and AMD processors. The Intel support is for the
- IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
- Xeon etc. The AMD support is for families 0x10 and later. You will
- obviously need the actual microcode binary data itself which is not
- shipped with the Linux kernel.
+ Intel and AMD processors. The Intel support is for the IA32 family,
+ e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
+ AMD support is for families 0x10 and later. You will obviously need
+ the actual microcode binary data itself which is not shipped with
+ the Linux kernel.
- This option selects the general module only, you need to select
- at least one vendor specific module as well.
+ The preferred method to load microcode from a detached initrd is described
+ in Documentation/x86/early-microcode.txt. For that you need to enable
+ CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
+ initrd for microcode blobs.
- To compile this driver as a module, choose M here: the module
- will be called microcode.
+ In addition, you can build-in the microcode into the kernel. For that you
+ need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
+ to the CONFIG_EXTRA_FIRMWARE config option.
config MICROCODE_INTEL
bool "Intel microcode loading support"
@@ -1995,7 +2012,7 @@ config HOTPLUG_CPU
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"
default n
- depends on HOTPLUG_CPU
+ depends on HOTPLUG_CPU && !SCHED_BFS
---help---
Set whether default state of cpu0_hotpluggable is on or off.
@@ -2024,7 +2041,7 @@ config BOOTPARAM_HOTPLUG_CPU0
config DEBUG_HOTPLUG_CPU0
def_bool n
prompt "Debug CPU0 hotplug"
- depends on HOTPLUG_CPU
+ depends on HOTPLUG_CPU && !SCHED_BFS
---help---
Enabling this option offlines CPU0 (if CPU0 can be offlined) as
soon as possible and boots up userspace with CPU0 offlined. User
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 03663740c..1a4477ced 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -268,6 +268,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
/* Called with IRQs disabled. */
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
{
+ struct thread_info *ti = pt_regs_to_thread_info(regs);
u32 cached_flags;
if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
@@ -275,12 +276,22 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
lockdep_sys_exit();
- cached_flags =
- READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+ cached_flags = READ_ONCE(ti->flags);
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
exit_to_usermode_loop(regs, cached_flags);
+#ifdef CONFIG_COMPAT
+ /*
+ * Compat syscalls set TS_COMPAT. Make sure we clear it before
+ * returning to user mode. We need to clear it *after* signal
+ * handling, because syscall restart has a fixup for compat
+ * syscalls. The fixup is exercised by the ptrace_syscall_32
+ * selftest.
+ */
+ ti->status &= ~TS_COMPAT;
+#endif
+
user_enter();
}
@@ -332,14 +343,6 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
syscall_slow_exit_work(regs, cached_flags);
-#ifdef CONFIG_COMPAT
- /*
- * Compat syscalls set TS_COMPAT. Make sure we clear it before
- * returning to user mode.
- */
- ti->status &= ~TS_COMPAT;
-#endif
-
local_irq_disable();
prepare_exit_to_usermode(regs);
}
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index c80f6b6f3..e8c4fba52 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -644,8 +644,8 @@ static inline void entering_irq(void)
static inline void entering_ack_irq(void)
{
- ack_APIC_irq();
entering_irq();
+ ack_APIC_irq();
}
static inline void ipi_entering_ack_irq(void)
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 1815b7362..84b3d194a 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -141,6 +141,7 @@ struct irq_alloc_info {
struct irq_cfg {
unsigned int dest_apicid;
u8 vector;
+ u8 old_vector;
};
extern struct irq_cfg *irq_cfg(unsigned int irq);
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 1e1b07a5a..9d3a96c4d 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -3,6 +3,7 @@
#include <asm/cpu.h>
#include <linux/earlycpio.h>
+#include <linux/initrd.h>
#define native_rdmsr(msr, val1, val2) \
do { \
@@ -143,4 +144,29 @@ static inline void reload_early_microcode(void) { }
static inline bool
get_builtin_firmware(struct cpio_data *cd, const char *name) { return false; }
#endif
+
+static inline unsigned long get_initrd_start(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ return initrd_start;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned long get_initrd_start_addr(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+#ifdef CONFIG_X86_32
+ unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+
+ return (unsigned long)__pa_nodebug(*initrd_start_p);
+#else
+ return get_initrd_start();
+#endif
+#else /* CONFIG_BLK_DEV_INITRD */
+ return 0;
+#endif
+}
+
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 7bcb861a0..5a2ed3ed2 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -165,6 +165,7 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_ASIF BIT_ULL(60)
#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(58)
+#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
* IBS cpuid feature detection
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 8b2d4bea9..39171b364 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -62,4 +62,6 @@ void xen_arch_register_cpu(int num);
void xen_arch_unregister_cpu(int num);
#endif
+extern void xen_set_iopl_mask(unsigned mask);
+
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 3b670df4b..ad59d70bc 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -213,6 +213,7 @@ update:
*/
cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
d->move_in_progress = !cpumask_empty(d->old_domain);
+ d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
d->cfg.vector = vector;
cpumask_copy(d->domain, vector_cpumask);
success:
@@ -655,46 +656,97 @@ void irq_complete_move(struct irq_cfg *cfg)
}
/*
- * Called with @desc->lock held and interrupts disabled.
+ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
*/
void irq_force_complete_move(struct irq_desc *desc)
{
struct irq_data *irqdata = irq_desc_get_irq_data(desc);
struct apic_chip_data *data = apic_chip_data(irqdata);
struct irq_cfg *cfg = data ? &data->cfg : NULL;
+ unsigned int cpu;
if (!cfg)
return;
- __irq_complete_move(cfg, cfg->vector);
-
/*
* This is tricky. If the cleanup of @data->old_domain has not been
* done yet, then the following setaffinity call will fail with
* -EBUSY. This can leave the interrupt in a stale state.
*
- * The cleanup cannot make progress because we hold @desc->lock. So in
- * case @data->old_domain is not yet cleaned up, we need to drop the
- * lock and acquire it again. @desc cannot go away, because the
- * hotplug code holds the sparse irq lock.
+ * All CPUs are stuck in stop machine with interrupts disabled so
+ * calling __irq_complete_move() would be completely pointless.
*/
raw_spin_lock(&vector_lock);
- /* Clean out all offline cpus (including ourself) first. */
+ /*
+ * Clean out all offline cpus (including the outgoing one) from the
+ * old_domain mask.
+ */
cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
- while (!cpumask_empty(data->old_domain)) {
+
+ /*
+ * If move_in_progress is cleared and the old_domain mask is empty,
+ * then there is nothing to cleanup. fixup_irqs() will take care of
+ * the stale vectors on the outgoing cpu.
+ */
+ if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
raw_spin_unlock(&vector_lock);
- raw_spin_unlock(&desc->lock);
- cpu_relax();
- raw_spin_lock(&desc->lock);
+ return;
+ }
+
+ /*
+ * 1) The interrupt is in move_in_progress state. That means that we
+ * have not seen an interrupt since the io_apic was reprogrammed to
+ * the new vector.
+ *
+ * 2) The interrupt has fired on the new vector, but the cleanup IPIs
+ * have not been processed yet.
+ */
+ if (data->move_in_progress) {
/*
- * Reevaluate apic_chip_data. It might have been cleared after
- * we dropped @desc->lock.
+ * In theory there is a race:
+ *
+ * set_ioapic(new_vector) <-- Interrupt is raised before update
+ * is effective, i.e. it's raised on
+ * the old vector.
+ *
+ * So if the target cpu cannot handle that interrupt before
+ * the old vector is cleaned up, we get a spurious interrupt
+ * and in the worst case the ioapic irq line becomes stale.
+ *
+ * But in case of cpu hotplug this should be a non issue
+ * because if the affinity update happens right before all
+ * cpus rendevouz in stop machine, there is no way that the
+ * interrupt can be blocked on the target cpu because all cpus
+ * loops first with interrupts enabled in stop machine, so the
+ * old vector is not yet cleaned up when the interrupt fires.
+ *
+ * So the only way to run into this issue is if the delivery
+ * of the interrupt on the apic/system bus would be delayed
+ * beyond the point where the target cpu disables interrupts
+ * in stop machine. I doubt that it can happen, but at least
+ * there is a theroretical chance. Virtualization might be
+ * able to expose this, but AFAICT the IOAPIC emulation is not
+ * as stupid as the real hardware.
+ *
+ * Anyway, there is nothing we can do about that at this point
+ * w/o refactoring the whole fixup_irq() business completely.
+ * We print at least the irq number and the old vector number,
+ * so we have the necessary information when a problem in that
+ * area arises.
*/
- data = apic_chip_data(irqdata);
- if (!data)
- return;
- raw_spin_lock(&vector_lock);
+ pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
+ irqdata->irq, cfg->old_vector);
}
+ /*
+ * If old_domain is not empty, then other cpus still have the irq
+ * descriptor set in their vector array. Clean it up.
+ */
+ for_each_cpu(cpu, data->old_domain)
+ per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
+
+ /* Cleanup the left overs of the (half finished) move */
+ cpumask_clear(data->old_domain);
+ data->move_in_progress = 0;
raw_spin_unlock(&vector_lock);
}
#endif
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index de8a164a6..f9c7f3302 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -551,10 +551,14 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
cd.data = NULL;
cd.size = 0;
- cd = find_cpio_data(p, (void *)start, size, &offset);
- if (!cd.data) {
+ /* try built-in microcode if no initrd */
+ if (!size) {
if (!load_builtin_intel_microcode(&cd))
return UCODE_ERROR;
+ } else {
+ cd = find_cpio_data(p, (void *)start, size, &offset);
+ if (!cd.data)
+ return UCODE_ERROR;
}
return get_matching_model_microcode(0, start, cd.data, cd.size,
@@ -690,7 +694,7 @@ int __init save_microcode_in_initrd_intel(void)
if (count == 0)
return ret;
- copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
+ copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
ret = save_microcode(&mc_saved_data, mc_saved, count);
if (ret)
pr_err("Cannot save microcode patches from initrd.\n");
@@ -728,16 +732,20 @@ void __init load_ucode_intel_bsp(void)
struct boot_params *p;
p = (struct boot_params *)__pa_nodebug(&boot_params);
- start = p->hdr.ramdisk_image;
size = p->hdr.ramdisk_size;
- _load_ucode_intel_bsp(
- (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
- (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
- start, size);
+ /*
+ * Set start only if we have an initrd image. We cannot use initrd_start
+ * because it is not set that early yet.
+ */
+ start = (size ? p->hdr.ramdisk_image : 0);
+
+ _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+ (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
+ start, size);
#else
- start = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
size = boot_params.hdr.ramdisk_size;
+ start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
_load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
#endif
@@ -748,20 +756,14 @@ void load_ucode_intel_ap(void)
struct mc_saved_data *mc_saved_data_p;
struct ucode_cpu_info uci;
unsigned long *mc_saved_in_initrd_p;
- unsigned long initrd_start_addr;
enum ucode_state ret;
#ifdef CONFIG_X86_32
- unsigned long *initrd_start_p;
- mc_saved_in_initrd_p =
- (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+ mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
- initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
- initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
#else
- mc_saved_data_p = &mc_saved_data;
mc_saved_in_initrd_p = mc_saved_in_initrd;
- initrd_start_addr = initrd_start;
+ mc_saved_data_p = &mc_saved_data;
#endif
/*
@@ -773,7 +775,7 @@ void load_ucode_intel_ap(void)
collect_cpu_info_early(&uci);
ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
- initrd_start_addr, &uci);
+ get_initrd_start_addr(), &uci);
if (ret != UCODE_OK)
return;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1b443db2d..6532f5b40 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -596,6 +596,19 @@ void x86_pmu_disable_all(void)
}
}
+/*
+ * There may be PMI landing after enabled=0. The PMI hitting could be before or
+ * after disable_all.
+ *
+ * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
+ * It will not be re-enabled in the NMI handler again, because enabled=0. After
+ * handling the NMI, disable_all will be called, which will not change the
+ * state either. If PMI hits after disable_all, the PMU is already disabled
+ * before entering NMI handler. The NMI handler will not change the state
+ * either.
+ *
+ * So either situation is harmless.
+ */
static void x86_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 7bb61e32f..98be6d6d3 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -586,6 +586,7 @@ struct x86_pmu {
pebs_broken :1,
pebs_prec_dist :1;
int pebs_record_size;
+ int pebs_buffer_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
@@ -904,6 +905,8 @@ void intel_pmu_lbr_init_skl(void);
void intel_pmu_lbr_init_knl(void);
+void intel_pmu_pebs_data_source_nhm(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fed2ab1f1..760aec1e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1502,7 +1502,15 @@ static __initconst const u64 knl_hw_cache_extra_regs
};
/*
- * Use from PMIs where the LBRs are already disabled.
+ * Used from PMIs where the LBRs are already disabled.
+ *
+ * This function could be called consecutively. It is required to remain in
+ * disabled state if called consecutively.
+ *
+ * During consecutive calls, the same disable value will be written to related
+ * registers, so the PMU state remains unchanged. hw.state in
+ * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
+ * calls.
*/
static void __intel_pmu_disable_all(void)
{
@@ -1884,6 +1892,16 @@ again:
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
x86_pmu.drain_pebs(regs);
+ /*
+ * There are cases where, even though, the PEBS ovfl bit is set
+ * in GLOBAL_OVF_STATUS, the PEBS events may also have their
+ * overflow bits set for their counters. We must clear them
+ * here because they have been processed as exact samples in
+ * the drain_pebs() routine. They must not be processed again
+ * in the for_each_bit_set() loop for regular samples below.
+ */
+ status &= ~cpuc->pebs_enabled;
+ status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
}
/*
@@ -1929,7 +1947,10 @@ again:
goto again;
done:
- __intel_pmu_enable_all(0, true);
+ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
+ if (cpuc->enabled)
+ __intel_pmu_enable_all(0, true);
+
/*
* Only unmask the NMI after the overflow counters
* have been reset. This avoids spurious NMIs on
@@ -3396,6 +3417,7 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ intel_pmu_pebs_data_source_nhm();
x86_add_quirk(intel_nehalem_quirk);
pr_cont("Nehalem events, ");
@@ -3459,6 +3481,7 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
+ intel_pmu_pebs_data_source_nhm();
pr_cont("Westmere events, ");
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 10602f0a4..955140140 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -51,7 +51,8 @@ union intel_x86_pebs_dse {
#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
-static const u64 pebs_data_source[] = {
+/* Version for Sandy Bridge and later */
+static u64 pebs_data_source[] = {
P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
@@ -70,6 +71,14 @@ static const u64 pebs_data_source[] = {
OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
};
+/* Patch up minor differences in the bits */
+void __init intel_pmu_pebs_data_source_nhm(void)
+{
+ pebs_data_source[0x05] = OP_LH | P(LVL, L3) | P(SNOOP, HIT);
+ pebs_data_source[0x06] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+ pebs_data_source[0x07] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+}
+
static u64 precise_store_data(u64 status)
{
union intel_x86_pebs_dse dse;
@@ -269,7 +278,7 @@ static int alloc_pebs_buffer(int cpu)
if (!x86_pmu.pebs)
return 0;
- buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
+ buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -286,7 +295,7 @@ static int alloc_pebs_buffer(int cpu)
per_cpu(insn_buffer, cpu) = ibuffer;
}
- max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
+ max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
ds->pebs_index = ds->pebs_buffer_base;
@@ -1319,6 +1328,7 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
+ x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
int format = x86_pmu.intel_cap.pebs_format;
@@ -1327,6 +1337,14 @@ void __init intel_ds_init(void)
case 0:
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
+ /*
+ * Using >PAGE_SIZE buffers makes the WRMSR to
+ * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
+ * mysteriously hang on Core2.
+ *
+ * As a workaround, we don't do this.
+ */
+ x86_pmu.pebs_buffer_size = PAGE_SIZE;
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
break;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 33acb884c..4547b2cca 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -2875,11 +2875,13 @@ static struct intel_uncore_type bdx_uncore_sbox = {
.format_group = &hswep_uncore_sbox_format_group,
};
+#define BDX_MSR_UNCORE_SBOX 3
+
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
- &bdx_uncore_sbox,
&hswep_uncore_pcu,
+ &bdx_uncore_sbox,
NULL,
};
@@ -2888,6 +2890,10 @@ void bdx_uncore_cpu_init(void)
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
+
+ /* BDX-DE doesn't have SBOX */
+ if (boot_cpu_data.x86_model == 86)
+ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
static struct intel_uncore_type bdx_uncore_ha = {
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 5b0c232d1..b931095e8 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -263,7 +263,9 @@ again:
goto again;
done:
- knc_pmu_enable_all(0);
+ /* Only restore PMU state when it's active. See x86_pmu_disable(). */
+ if (cpuc->enabled)
+ knc_pmu_enable_all(0);
return handled;
}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 37dae792d..435466fbd 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -28,8 +28,18 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
return -EINVAL;
+#ifdef CONFIG_SCHED_BFS_AUTOISO
+ if (turn_on) {
+ struct sched_param param = { .sched_priority = 0 };
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ /* Start X as SCHED_ISO */
+ sched_setscheduler_nocheck(current, SCHED_ISO, &param);
+ }
+#else
if (turn_on && !capable(CAP_SYS_RAWIO))
return -EPERM;
+#endif
/*
* If it's the first ioperm() call in this thread's lifetime, set the
@@ -96,18 +106,31 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
SYSCALL_DEFINE1(iopl, unsigned int, level)
{
struct pt_regs *regs = current_pt_regs();
- unsigned int old = (regs->flags >> 12) & 3;
struct thread_struct *t = &current->thread;
+ /*
+ * Careful: the IOPL bits in regs->flags are undefined under Xen PV
+ * and changing them has no effect.
+ */
+ unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
+
if (level > 3)
return -EINVAL;
/* Trying to gain more privileges? */
if (level > old) {
+#ifdef CONFIG_SCHED_BFS_AUTOISO
+ struct sched_param param = { .sched_priority = 0 };
+#endif
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+#ifdef CONFIG_SCHED_BFS_AUTOISO
+ /* Start X as SCHED_ISO */
+ sched_setscheduler_nocheck(current, SCHED_ISO, &param);
+#endif
}
- regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
- t->iopl = level << 12;
+ regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
+ (level << X86_EFLAGS_IOPL_BIT);
+ t->iopl = level << X86_EFLAGS_IOPL_BIT;
set_iopl_mask(t->iopl);
return 0;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b9d99e0f8..9f7518760 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -48,6 +48,7 @@
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
+#include <asm/xen/hypervisor.h>
asmlinkage extern void ret_from_fork(void);
@@ -411,6 +412,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
__switch_to_xtra(prev_p, next_p, tss);
+#ifdef CONFIG_XEN
+ /*
+ * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
+ * current_pt_regs()->flags may not match the current task's
+ * intended IOPL. We need to switch it manually.
+ */
+ if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
+ prev->iopl != next->iopl))
+ xen_set_iopl_mask(next->iopl);
+#endif
+
if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
/*
* AMD CPUs have a misfeature: SYSRET sets the SS selector but
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b0ea42b78..ab5318727 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
* PIC is being reset. Handle it gracefully here
*/
atomic_inc(&ps->pending);
- else if (value > 0)
+ else if (value > 0 && ps->reinject)
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject. Reinject
*/
@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
* last one has been acked.
*/
spin_lock(&ps->inject_lock);
- if (ps->irq_ack) {
+ if (!ps->reinject)
+ inject = 1;
+ else if (ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
struct kvm_pit *pt = ps->kvm->arch.vpit;
- if (ps->reinject || !atomic_read(&ps->pending)) {
+ if (ps->reinject)
atomic_inc(&ps->pending);
- queue_kthread_work(&pt->worker, &pt->expired);
- }
+
+ queue_kthread_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9bd8f44ba..539062e24 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2702,8 +2702,15 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
+ /*
+ * Old versions of KVM use the single-context version without
+ * checking for support, so declare that it is supported even
+ * though it is treated as global context. The alternative is
+ * not failing the single-context invvpid, and it is worse.
+ */
if (enable_vpid)
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
else
vmx->nested.nested_vmx_vpid_caps = 0;
@@ -7398,6 +7405,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ skip_emulated_instruction(vcpu);
return 1;
}
@@ -7456,6 +7464,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ skip_emulated_instruction(vcpu);
return 1;
}
@@ -7472,12 +7481,17 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
}
switch (type) {
+ case VMX_VPID_EXTENT_SINGLE_CONTEXT:
+ /*
+ * Old versions of KVM use the single-context version so we
+ * have to support it; just treat it the same as all-context.
+ */
case VMX_VPID_EXTENT_ALL_CONTEXT:
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
nested_vmx_succeed(vcpu);
break;
default:
- /* Trap single context invalidation invvpid calls */
+ /* Trap individual address invalidation invvpid calls */
BUG_ON(1);
break;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eaf6ee8c2..d47d231e0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2752,6 +2752,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8f4cc3dfa..5fb6adaaa 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -106,8 +106,6 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
- if (!f->flush_end)
- f->flush_end = f->flush_start + PAGE_SIZE;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
unsigned long end)
{
struct flush_tlb_info info;
+
+ if (end == 0)
+ end = start + PAGE_SIZE;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
- trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
+ if (end == TLB_FLUSH_ALL)
+ trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
+ else
+ trace_tlb_flush(TLB_REMOTE_SEND_IPI,
+ (end - start) >> PAGE_SHIFT);
+
if (is_uv_system()) {
unsigned int cpu;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e58565556..0ae7e9fa3 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -540,3 +540,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
+
+static void pci_bdwep_bar(struct pci_dev *dev)
+{
+ dev->non_compliant_bars = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d09e4c9d7..e3679db17 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -961,7 +961,7 @@ static void xen_load_sp0(struct tss_struct *tss,
tss->x86_tss.sp0 = thread->sp0;
}
-static void xen_set_iopl_mask(unsigned mask)
+void xen_set_iopl_mask(unsigned mask)
{
struct physdev_set_iopl set_iopl;
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 9ed55649a..05e1df943 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -128,7 +128,7 @@ ENTRY(_startup)
wsr a0, icountlevel
.set _index, 0
- .rept XCHAL_NUM_DBREAK - 1
+ .rept XCHAL_NUM_DBREAK
wsr a0, SREG_DBREAKC + _index
.set _index, _index + 1
.endr
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index d75aa1476..1a804a2f9 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
unsigned long paddr;
void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
- pagefault_disable();
+ preempt_disable();
kmap_invalidate_coherent(page, vaddr);
set_bit(PG_arch_1, &page->flags);
clear_page_alias(kvaddr, paddr);
- pagefault_enable();
+ preempt_enable();
}
void copy_user_highpage(struct page *dst, struct page *src,
@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
&src_paddr);
- pagefault_disable();
+ preempt_disable();
kmap_invalidate_coherent(dst, vaddr);
set_bit(PG_arch_1, &dst->flags);
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
- pagefault_enable();
+ preempt_enable();
}
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 70cb408bc..92d785fef 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
{
struct tty_port *port = (struct tty_port *)priv;
int i = 0;
+ int rd = 1;
unsigned char c;
spin_lock(&timer_lock);
while (simc_poll(0)) {
- simc_read(0, &c, 1);
+ rd = simc_read(0, &c, 1);
+ if (rd <= 0)
+ break;
tty_insert_flip_char(port, c, TTY_NORMAL);
i++;
}
if (i)
tty_flip_buffer_push(port);
-
-
- mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ if (rd)
+ mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
spin_unlock(&timer_lock);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 64d137b04..f98581f9e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2203,7 +2203,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
- blk_mq_insert_request(rq, false, true, true);
+ blk_mq_insert_request(rq, false, true, false);
return 0;
}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 021d39c0b..13c4e5a5f 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -494,7 +494,7 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
const unsigned char *value, size_t vlen)
{
- static const unsigned char month_lengths[] = { 31, 29, 31, 30, 31, 30,
+ static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31 };
const unsigned char *p = value;
unsigned year, mon, day, hour, min, sec, mon_len;
@@ -540,9 +540,9 @@ int x509_decode_time(time64_t *_t, size_t hdrlen,
if (year % 4 == 0) {
mon_len = 29;
if (year % 100 == 0) {
- year /= 100;
- if (year % 4 != 0)
- mon_len = 28;
+ mon_len = 28;
+ if (year % 400 == 0)
+ mon_len = 29;
}
}
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index b1d106ce5..72014f963 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
SEMIBSIZE))
ret = -EBADMSG;
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
+ memzero_explicit(block, sizeof(struct crypto_kw_block));
return ret;
}
@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
/* establish the IV for the caller to pick up */
memcpy(desc->info, block->A, SEMIBSIZE);
- memzero_explicit(&block, sizeof(struct crypto_kw_block));
+ memzero_explicit(block, sizeof(struct crypto_kw_block));
return 0;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index d02fd5304..56241eb34 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -27,8 +27,20 @@
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+static inline bool acpi_iospace_resource_valid(struct resource *res)
+{
+ /* On X86 IO space is limited to the [0 - 64K] IO port range */
+ return res->end < 0x10003;
+}
#else
#define valid_IRQ(i) (true)
+/*
+ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
+ * addresses mapping IO space in CPU physical address space, IO space
+ * resources can be placed anywhere in the 64-bit physical address space.
+ */
+static inline bool
+acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (res->end >= 0x10003)
+ if (!acpi_iospace_resource_valid(res))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (io_decode == ACPI_DECODE_16)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9cb975200..f054cadf3 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -714,6 +714,7 @@ static int acpi_hibernation_enter(void)
static void acpi_hibernation_leave(void)
{
+ pm_set_resume_via_firmware();
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index cb27190e9..f7ecc287d 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
- bio->bi_iter.bi_size & PAGE_MASK)
+ bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d..0b816b24b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -706,6 +706,24 @@ static inline int is_loop_device(struct file *file)
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
}
+/*
+ * for AUFS
+ * no get/put for file.
+ */
+struct file *loop_backing_file(struct super_block *sb)
+{
+ struct file *ret;
+ struct loop_device *l;
+
+ ret = NULL;
+ if (MAJOR(sb->s_dev) == LOOP_MAJOR) {
+ l = sb->s_bdev->bd_disk->private_data;
+ ret = l->lo_backing_file;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(loop_backing_file);
+
/* loop sysfs attributes */
static ssize_t loop_attr_show(struct device *dev, char *page,
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9b180dbbd..1c330b61f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
+ if (mtip_check_surprise_removal(dd->pdev))
+ return NULL;
+
rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
+ if (IS_ERR(rq))
+ return NULL;
+
return blk_mq_rq_to_pdu(rq);
}
@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
"Command tag %d failed due to TFE\n", tag);
}
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
-
rq = mtip_rq_from_tag(dd, tag);
- if (unlikely(cmd->unaligned))
- up(&port->cmd_slot_unal);
-
- blk_mq_end_request(rq, status ? -EIO : 0);
+ blk_mq_complete_request(rq, status);
}
/*
@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
dev_warn(&port->dd->pdev->dev,
"Internal command %d completed with TFE\n", tag);
+ command->comp_func = NULL;
+ command->comp_data = NULL;
complete(waiting);
}
@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
port = dd->port;
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
cmd->comp_func(port, MTIP_TAG_INTERNAL,
cmd, PORT_IRQ_TF_ERR);
}
- goto handle_tfe_exit;
+ return;
}
/* clear the tag accumulator */
@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
-
-handle_tfe_exit:
- /* clear eh_active */
- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
/*
@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
+ * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
+ gfp_t atomic)
{
unsigned long to;
unsigned int n;
@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
+ atomic == GFP_KERNEL) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- msleep(100);
+ if (atomic == GFP_KERNEL)
+ msleep(100);
+ else {
+ cpu_relax();
+ udelay(100);
+ }
+
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
- goto err_fault;
/*
* Ignore s_active bit 0 of array element 0.
@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
int rv = 0;
+ unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
int_cmd = mtip_get_int_command(dd);
+ if (!int_cmd) {
+ dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
+ return -EFAULT;
+ }
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
mtip_put_int_command(dd, int_cmd);
@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Populate the command header */
int_cmd->command_header->byte_count = 0;
+ start = jiffies;
+
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if ((rv = wait_for_completion_interruptible_timeout(
&wait,
msecs_to_jiffies(timeout))) <= 0) {
+
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
rv = -EINTR;
goto exec_ic_exit;
} else if (rv == 0) /* timeout */
@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
return -EFAULT;
}
+static void mtip_softirq_done_fn(struct request *rq)
+{
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct driver_data *dd = rq->q->queuedata;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
+ cmd->direction);
+
+ if (unlikely(cmd->unaligned))
+ up(&dd->port->cmd_slot_unal);
+
+ blk_mq_end_request(rq, rq->errors);
+}
+
+static void mtip_abort_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
+
+ clear_bit(req->tag, dd->port->cmds_to_issue);
+ req->errors = -EIO;
+ mtip_softirq_done_fn(req);
+}
+
+static void mtip_queue_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ set_bit(req->tag, dd->port->cmds_to_issue);
+ blk_abort_request(req);
+}
+
/*
* service thread to issue queued commands
*
@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
static int mtip_service_thread(void *data)
{
struct driver_data *dd = (struct driver_data *)data;
- unsigned long slot, slot_start, slot_wrap;
+ unsigned long slot, slot_start, slot_wrap, to;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !(port->flags & MTIP_PF_PAUSE_IO));
-
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ (port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
&dd->dd_flag)))
goto st_out;
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
restart_eh:
/* Demux bits: start with error handling */
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
@@ -2939,6 +2987,32 @@ restart_eh:
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
goto restart_eh;
+ if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
+ to = jiffies + msecs_to_jiffies(5000);
+
+ do {
+ mdelay(100);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0)
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!");
+
+ spin_lock(dd->queue->queue_lock);
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_queue_cmd, dd);
+ spin_unlock(dd->queue->queue_lock);
+
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
+
+ if (mtip_device_reset(dd))
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_abort_cmd, dd);
+
+ clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
+ }
+
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -2971,10 +3045,8 @@ restart_eh:
}
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (mtip_ftl_rebuild_poll(dd) < 0)
- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
- &dd->dd_flag);
- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ if (mtip_ftl_rebuild_poll(dd) == 0)
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
}
@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
- /* TODO */
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
@@ -3263,20 +3335,25 @@ out1:
return rv;
}
-static void mtip_standby_drive(struct driver_data *dd)
+static int mtip_standby_drive(struct driver_data *dd)
{
- if (dd->sr)
- return;
+ int rv = 0;
+ if (dd->sr || !dd->port)
+ return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
- if (mtip_standby_immediate(dd->port))
+ !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
+ rv = mtip_standby_immediate(dd->port);
+ if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
+ }
+ return rv;
}
/*
@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
*/
static int mtip_hw_exit(struct driver_data *dd)
{
- /*
- * Send standby immediate (E0h) to the drive so that it
- * saves its state.
- */
if (!dd->sr) {
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!dd->sr && dd->port)
- mtip_standby_immediate(dd->port);
+ mtip_standby_drive(dd);
return 0;
}
@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
- if (mtip_standby_immediate(dd->port) != 0) {
+ if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
+static int mtip_block_open(struct block_device *dev, fmode_t mode)
+{
+ struct driver_data *dd;
+
+ if (dev && dev->bd_disk) {
+ dd = (struct driver_data *) dev->bd_disk->private_data;
+
+ if (dd) {
+ if (test_bit(MTIP_DDF_REMOVAL_BIT,
+ &dd->dd_flag)) {
+ return -ENODEV;
+ }
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+void mtip_block_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
/*
* Block device operation function.
*
@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
+ .open = mtip_block_open,
+ .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
rq_data_dir(rq))) {
return -ENODATA;
}
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
return -ENODATA;
- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
- return -ENXIO;
}
if (rq->cmd_flags & REQ_DISCARD) {
@@ -3779,11 +3874,33 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
return 0;
}
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
+ bool reserved)
+{
+ struct driver_data *dd = req->q->queuedata;
+ int ret = BLK_EH_RESET_TIMER;
+
+ if (reserved)
+ goto exit_handler;
+
+ if (test_bit(req->tag, dd->port->cmds_to_issue))
+ goto exit_handler;
+
+ if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
+ goto exit_handler;
+
+ wake_up_interruptible(&dd->port->svc_wait);
+exit_handler:
+ return ret;
+}
+
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
+ .complete = mtip_softirq_done_fn,
+ .timeout = mtip_cmd_timeout,
};
/*
@@ -3850,7 +3967,6 @@ static int mtip_block_initialize(struct driver_data *dd)
mtip_hw_debugfs_init(dd);
-skip_create_disk:
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
@@ -3860,12 +3976,13 @@ skip_create_disk:
dd->tags.numa_node = dd->numa_node;
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
+ dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
rv = blk_mq_alloc_tag_set(&dd->tags);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- goto block_queue_alloc_init_error;
+ goto block_queue_alloc_tag_error;
}
/* Allocate the request queue. */
@@ -3880,6 +3997,7 @@ skip_create_disk:
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
+skip_create_disk:
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_get_identify(dd);
if (wait_for_rebuild < 0) {
@@ -3976,8 +4094,9 @@ kthread_run_error:
read_capacity_error:
init_hw_cmds_error:
blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_init_error:
+ blk_mq_free_tag_set(&dd->tags);
+block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
@@ -3994,6 +4113,22 @@ protocol_init_error:
return rv;
}
+static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ struct mtip_cmd *cmd;
+
+ if (likely(!reserv))
+ blk_mq_complete_request(rq, -ENODEV);
+ else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+
+ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ if (cmd->comp_func)
+ cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
+ cmd, -ENODEV);
+ }
+}
+
/*
* Block layer deinitialization function.
*
@@ -4025,12 +4160,23 @@ static int mtip_block_remove(struct driver_data *dd)
}
}
- if (!dd->sr)
- mtip_standby_drive(dd);
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
+ GFP_KERNEL))
+ mtip_standby_drive(dd);
+ }
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
+ blk_mq_freeze_queue_start(dd->queue);
+ blk_mq_stop_hw_queues(dd->queue);
+ blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+
/*
* Delete our gendisk structure. This also removes the device
* from /dev
@@ -4040,7 +4186,8 @@ static int mtip_block_remove(struct driver_data *dd)
dd->bdev = NULL;
}
if (dd->disk) {
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4081,7 +4228,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4426,7 +4574,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
- set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
@@ -4443,12 +4591,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
+ if (!dd->sr)
+ fsync_bdev(dd->bdev);
+
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
- blk_mq_stop_hw_queues(dd->queue);
+ blk_set_queue_dying(dd->queue);
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
/* Clean up the block layer. */
mtip_block_remove(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 327478400..7617888f7 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -134,16 +134,24 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
(1 << MTIP_PF_EH_ACTIVE_BIT) |
(1 << MTIP_PF_SE_ACTIVE_BIT) |
- (1 << MTIP_PF_DM_ACTIVE_BIT)),
+ (1 << MTIP_PF_DM_ACTIVE_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
MTIP_PF_SVC_THD_STOP_BIT = 8,
+ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
+ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
+ (1 << MTIP_PF_REBUILD_BIT) |
+ (1 << MTIP_PF_SVC_THD_STOP_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
+
/* below are bit numbers in 'dd_flag' defined in driver_data */
MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1,
@@ -153,6 +161,7 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+ MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index cda867ada..79129bf08 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
+ { USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
+ { USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d897e7454..154b9ecc9 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 45cc39aab..252142524 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -136,11 +136,13 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
+ devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
+
return chip;
}
EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
-static int tpm_dev_add_device(struct tpm_chip *chip)
+static int tpm_add_char_device(struct tpm_chip *chip)
{
int rc;
@@ -151,7 +153,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
- device_unregister(&chip->dev);
return rc;
}
@@ -162,16 +163,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
+ cdev_del(&chip->cdev);
return rc;
}
return rc;
}
-static void tpm_dev_del_device(struct tpm_chip *chip)
+static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
- device_unregister(&chip->dev);
+ device_del(&chip->dev);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -222,7 +224,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
- rc = tpm_dev_add_device(chip);
+ rc = tpm_add_char_device(chip);
if (rc)
goto out_err;
@@ -274,6 +276,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
sysfs_remove_link(&chip->pdev->kobj, "ppi");
tpm1_chip_unregister(chip);
- tpm_dev_del_device(chip);
+ tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8342cf51f..26bab5a29 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -302,11 +302,11 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
- tpm_chip_unregister(chip);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_unregister(chip);
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index bd72fb042..4e6940acf 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
- char *tempPtr;
+ char *temp_ptr;
int i;
memcpy(&temp_event, event, sizeof(struct tcpa_event));
@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
temp_event.event_type = do_endian_conversion(event->event_type);
temp_event.event_size = do_endian_conversion(event->event_size);
- tempPtr = (char *)&temp_event;
+ temp_ptr = (char *) &temp_event;
- for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
- seq_putc(m, tempPtr[i]);
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
return 0;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 015e687ff..9f4df8f64 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1107,13 +1107,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
struct bcm2835_cprman *cprman = divider->cprman;
const struct bcm2835_pll_divider_data *data = divider->data;
- u32 cm;
- int ret;
+ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
- ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
- if (ret)
- return ret;
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+
+ div = min(div, max_div);
+ if (div == max_div)
+ div = 0;
+ cprman_write(cprman, data->a2w_reg, div);
cm = cprman_read(cprman, data->cm_reg);
cprman_write(cprman, data->cm_reg, cm | data->load_mask);
cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 7f7444cbf..05263571c 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -748,6 +748,7 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_cpu",
"pclk_peri",
+ "hclk_cpubus"
};
static void __init rk3188_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 21f3ea909..57acb625c 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
.core_reg = RK3368_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
- .mux_core_shift = 15,
+ .mux_core_shift = 7,
};
static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
}
static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
- RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
- RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+ RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
- RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
- RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
- RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+ RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+ RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
@@ -384,10 +384,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* Clock-Architecture Diagram 3
*/
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 6, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 7, GFLAGS),
@@ -442,7 +442,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK3368_CLKGATE_CON(4), 13, GFLAGS),
GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
- RK3368_CLKGATE_CON(5), 12, GFLAGS),
+ RK3368_CLKGATE_CON(4), 12, GFLAGS),
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e979ec78b..da8b7b9f4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
@@ -1900,6 +1901,12 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
}
out:
+ if (likely(retval != -EINVAL)) {
+ if (target_freq == policy->max)
+ cpu_nonscaling(policy->cpu);
+ else
+ cpu_scaling(policy->cpu);
+ }
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 606ad74ab..ce8a7e14d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -15,8 +15,14 @@
#include "cpufreq_governor.h"
/* Conservative governor macros */
+#ifdef CONFIG_SCHED_BFS
+#define DEF_FREQUENCY_UP_THRESHOLD (63)
+#define DEF_FREQUENCY_DOWN_THRESHOLD (26)
+#else
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#endif
+
#define DEF_FREQUENCY_STEP (5)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 9e6fe9e86..38fbd8771 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -19,7 +19,12 @@
#include "cpufreq_governor.h"
/* On-demand governor macros */
+#ifdef CONFIG_SCHED_BFS
+#define DEF_FREQUENCY_UP_THRESHOLD (63)
+#else
#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#endif
+
#ifdef CONFIG_PCK_INTERACTIVE
#define DEF_SAMPLING_DOWN_FACTOR (10)
#else
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cd83d477e..0248282d4 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -575,8 +575,13 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate)
vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
vid = ceiling_fp(vid_fp);
- if (pstate > cpudata->pstate.max_pstate)
- vid = cpudata->vid.turbo;
+ if (pstate < cpudata->pstate.max_pstate)
+ cpu_scaling(cpudata->cpu);
+ else {
+ if (pstate > cpudata->pstate.max_pstate)
+ vid = cpudata->vid.turbo;
+ cpu_nonscaling(cpudata->cpu);
+ }
val |= vid;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 3eb3f1279..7de007abe 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2085,9 +2085,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (!aes_dd->io_base) {
+ if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(aes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8bf9914d4..68d47a2da 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1404,9 +1404,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (!sha_dd->io_base) {
+ if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(sha_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 2c7a628d0..bf467d7be 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (!tdes_dd->io_base) {
+ if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(tdes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d89f20c04..3d9acc53d 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -220,6 +220,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->null_msg = state.null_msg;
+ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -352,10 +385,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
+ alg->export = ccp_aes_cmac_export;
+ alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
+ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index d14b3f28e..8ef06fad8 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -207,6 +207,43 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->type = state.type;
+ rctx->msg_bits = state.msg_bits;
+ rctx->first = state.first;
+ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -403,9 +440,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
+ alg->export = ccp_sha_export;
+ alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
+ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0f4..a326ec20b 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_aes_cmac_exp_ctx {
+ unsigned int null_msg;
+
+ u8 iv[AES_BLOCK_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+};
+
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_sha_exp_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index c0656e7f3..80239ae69 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
- return -ENOMEM;
+ return PTR_ERR(cesa->regs);
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 4c243c1ff..790f7cadc 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index d6fdc583c..574e87c7f 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
spin_lock_init(&device_data->ctx_lock);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9eee13ef8..d87a47547 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f5c6b97c8..93f0d4120 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1839,8 +1839,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
+ (u32)(1 << TAD_SOCK(reg)),
+ (u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
@@ -2118,7 +2118,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg) + 1;
+ sck_way = 1 << TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@@ -2175,7 +2175,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
n_tads,
addr,
limit,
- (u32)TAD_SOCK(reg),
+ sck_way,
ch_way,
offset,
idx,
@@ -2190,18 +2190,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset, addr);
return -EINVAL;
}
- addr -= offset;
- /* Store the low bits [0:6] of the addr */
- ch_addr = addr & 0x7f;
- /* Remove socket wayness and remove 6 bits */
- addr >>= 6;
- addr = div_u64(addr, sck_xch);
-#if 0
- /* Divide by channel way */
- addr = addr / ch_way;
-#endif
- /* Recover the last 6 bits */
- ch_addr |= addr << 6;
+
+ ch_addr = addr - offset;
+ ch_addr >>= (6 + shiftup);
+ ch_addr /= ch_way * sck_way;
+ ch_addr <<= (6 + shiftup);
+ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 0c2f0a61b..0b631e5b5 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
found:
__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
- header = (struct nvram_header *)nvram_buf;
- nvram_len = header->len;
+ nvram_len = ((struct nvram_header *)(nvram_buf))->len;
if (nvram_len > size) {
pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
nvram_len = size;
}
if (nvram_len >= NVRAM_SPACE) {
pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
- header->len, NVRAM_SPACE - 1);
+ nvram_len, NVRAM_SPACE - 1);
nvram_len = NVRAM_SPACE - 1;
}
/* proceed reading data after header */
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 23196c5fc..99b375c95 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -367,9 +367,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
for(bank=0; bank<NBANK(chip); bank++) {
- unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
+ unsigned bankmask = mask[bank / sizeof(*mask)] >>
+ ((bank % sizeof(*mask)) * 8);
if(bankmask) {
- unsigned bankval = bits[bank/4] >> ((bank % 4) * 8);
+ unsigned bankval = bits[bank / sizeof(*bits)] >>
+ ((bank % sizeof(*bits)) * 8);
reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3c895863f..81dc6b654 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 51bfc1145..d6c68d00c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,6 +62,12 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1511,7 +1517,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev))
+ if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index ed90e3b64..8c01e1565 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index cf01177ca..2ea012e88 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEDPM);
+
cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 01b20e14a..6104d7d74 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index c4b4f298a..9bc408c9f 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4197ca1bb..e2396336f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (rdev->flags & RADEON_IS_PX)
+ if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2d9196a44..bfcef4db8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
- radeon_fbdev_init(rdev);
- drm_kms_helper_poll_init(rdev->ddev);
- }
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
/* do pm late init */
ret = radeon_pm_late_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index df7a1719c..9d210bbca 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -525,17 +525,9 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
{
struct radeon_connector_atom_dig *dig_connector;
- int ret;
-
dig_connector = mst_enc->connector->con_priv;
- ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
- dig_connector->dpcd, adjusted_mode->clock,
- &dig_connector->dp_lane_count,
- &dig_connector->dp_clock);
- if (ret) {
- dig_connector->dp_lane_count = 0;
- dig_connector->dp_clock = 0;
- }
+ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
dig_connector->dp_lane_count, dig_connector->dp_clock);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d2e628eea..d17959633 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -292,7 +292,8 @@ out_unref:
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
int bpp_sel = 32;
int ret;
+ /* don't enable fbdev if no connectors */
+ if (list_empty(&rdev->ddev->mode_config.connector_list))
+ return 0;
+
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ if (rdev->mode_info.rfbdev)
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
+ if (!rdev->mode_info.rfbdev)
+ return false;
+
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fbdev_restore_mode(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 22278bcfc..ac8eafea6 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -499,11 +499,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- ret = copy_from_user(bo->base.vaddr,
+ if (copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
- args->size);
- if (ret != 0)
+ args->size)) {
+ ret = -EFAULT;
goto fail;
+ }
/* Clear the rest of the memory from allocating from the BO
* cache.
*/
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7e89288b1..99446ffd7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1891,6 +1891,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
@@ -2615,9 +2616,10 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers ||
- (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver))) {
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !hid_match_id(hdev, hid_have_special_driver)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 296d49915..a20fc604f 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
td->is_buttonpad = true;
break;
+ case 0xff0000c5:
+ /* Retrieve the Win8 blob once to enable some devices */
+ if (usage->usage_index == 0)
+ mt_get_feature(hdev, field->report);
+ break;
}
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b9216938a..bb897497f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
+ u16 size;
+ int args_len;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (data_len > ihid->bufsize)
+ return -EINVAL;
- /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
- u16 size = 2 /* size */ +
+ size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
- int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
- int index = 0;
-
- i2c_hid_dbg(ihid, "%s\n", __func__);
if (!use_data && maxOutputLength == 0)
return -ENOSYS;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cd4510a63..146eed70b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -65,7 +65,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4"
+#define INTEL_IDLE_VERSION "0.4.1"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
@@ -994,36 +994,92 @@ static void intel_idle_cpuidle_devices_uninit(void)
}
/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
+ * ivt_idle_state_table_update(void)
*
- * Currently used to access tuned IVT multi-socket targets
+ * Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-void intel_idle_state_table_update(void)
+static void ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
- if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
- int cpu, package_num, num_sockets = 1;
-
- for_each_online_cpu(cpu) {
- package_num = topology_physical_package_id(cpu);
- if (package_num + 1 > num_sockets) {
- num_sockets = package_num + 1;
-
- if (num_sockets > 4) {
- cpuidle_state_table = ivt_cstates_8s;
- return;
- }
+ int cpu, package_num, num_sockets = 1;
+
+ for_each_online_cpu(cpu) {
+ package_num = topology_physical_package_id(cpu);
+ if (package_num + 1 > num_sockets) {
+ num_sockets = package_num + 1;
+
+ if (num_sockets > 4) {
+ cpuidle_state_table = ivt_cstates_8s;
+ return;
}
}
+ }
+
+ if (num_sockets > 2)
+ cpuidle_state_table = ivt_cstates_4s;
+
+ /* else, 1 and 2 socket systems use default ivt_cstates */
+}
+/*
+ * sklh_idle_state_table_update(void)
+ *
+ * On SKL-H (model 0x5e) disable C8 and C9 if:
+ * C10 is enabled and SGX disabled
+ */
+static void sklh_idle_state_table_update(void)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+
+
+ /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
+ if (max_cstate <= 7)
+ return;
+
+ /* if PC10 not present in CPUID.MWAIT.EDX */
+ if ((mwait_substates & (0xF << 28)) == 0)
+ return;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
+
+ /* PC10 is not enabled in PKG C-state limit */
+ if ((msr & 0xF) != 8)
+ return;
+
+ ecx = 0;
+ cpuid(7, &eax, &ebx, &ecx, &edx);
+
+ /* if SGX is present */
+ if (ebx & (1 << 2)) {
- if (num_sockets > 2)
- cpuidle_state_table = ivt_cstates_4s;
- /* else, 1 and 2 socket systems use default ivt_cstates */
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+
+ /* if SGX is enabled */
+ if (msr & (1 << 18))
+ return;
+ }
+
+ skl_cstates[5].disabled = 1; /* C8-SKL */
+ skl_cstates[6].disabled = 1; /* C9-SKL */
+}
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ */
+
+static void intel_idle_state_table_update(void)
+{
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x3e: /* IVT */
+ ivt_idle_state_table_update();
+ break;
+ case 0x5e: /* SKL-H */
+ sklh_idle_state_table_update();
+ break;
}
- return;
}
/*
@@ -1063,6 +1119,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
if (num_substates == 0)
continue;
+ /* if state marked as disabled, skip it */
+ if (cpuidle_state_table[cstate].disabled != 0) {
+ pr_debug(PREFIX "state %s is disabled",
+ cpuidle_state_table[cstate].name);
+ continue;
+ }
+
+
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f121e6129..0e1a802c3 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -65,6 +65,7 @@ isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_wait4flush(struct isert_conn *isert_conn);
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -820,12 +821,31 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ complete(&isert_conn->wait);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -837,23 +857,19 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
+
+ isert_info("conn %p completing wait\n", isert_conn);
+ complete(&isert_conn->wait);
}
static int
@@ -887,35 +903,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ isert_wait4flush(isert_conn);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -934,12 +942,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -985,13 +997,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1007,12 +1016,9 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1132,12 +1138,9 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1633,7 +1636,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
DMA_FROM_DEVICE);
- isert_conn->post_recv_buf_count--;
}
static int
@@ -2048,7 +2050,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
void *start = isert_conn->rx_descs;
int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
- if (wr_id >= start && wr_id < start + len)
+ if ((wr_id >= start && wr_id < start + len) ||
+ (wr_id == isert_conn->login_req_buf))
return false;
return true;
@@ -2072,10 +2075,6 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
isert_unmap_tx_desc(desc, ib_dev);
else
isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
}
@@ -3214,6 +3213,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 8d50453ee..1aa019ab9 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -84,6 +84,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
@@ -179,7 +180,6 @@ struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0c37fee36..4328679a6 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1670,47 +1670,6 @@ send_sense:
return -1;
}
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
-
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->cmd.tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
-}
-
static int srp_tmr_to_tcm(int fn)
{
switch (fn) {
@@ -1745,7 +1704,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
- uint32_t tag = 0;
int tcm_tmr;
int rc;
@@ -1761,25 +1719,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto fail;
- }
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
- if (rc < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_DOES_NOT_EXIST;
- goto fail;
- }
- tag = srp_tsk->task_tag;
- }
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+ srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index cfd58e87d..1c5914cae 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
ar2->udev = udev;
+ /* Sanity check, first interface must have an endpoint */
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 0 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail1;
+ }
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
+ /* Sanity check, the device must have two interfaces */
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
+ if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
+ dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
+ __func__, udev->actconfig->desc.bNumInterfaces);
+ r = -ENODEV;
+ goto fail1;
+ }
+
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
+
+ /* Sanity check, second interface must have an endpoint */
alt = ar2->intf[1]->cur_altsetting;
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 1 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail2;
+ }
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
- goto fail2;
+ goto fail3;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
- goto fail2;
+ goto fail3;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
- goto fail2;
+ goto fail3;
r = ati_remote2_input_init(ar2);
if (r)
- goto fail3;
+ goto fail4;
usb_set_intfdata(interface, ar2);
@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
return 0;
- fail3:
+ fail4:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
- fail2:
+ fail3:
ati_remote2_urb_cleanup(ar2);
+ fail2:
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 43d082530..f47851868 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
+ if (!pcu->ctrl_intf)
+ return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
pcu->ep_ctrl = &alt->endpoint[0].desc;
@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
+ if (!pcu->data_intf)
+ return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 63b539d3d..84909a12f 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
int error = -ENOMEM;
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 1)
+ return -EINVAL;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e678212f1..a5c2eb291 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
return;
- /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
- if (SYN_ID_FULL(priv->identity) == 0x801 &&
+ /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
+ if ((SYN_ID_FULL(priv->identity) == 0x801 ||
+ SYN_ID_FULL(priv->identity) == 0x802) &&
!((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
return;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8d0ead98e..a296425a7 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
*/
atomic_set(&dc->count, 1);
- if (bch_cached_dev_writeback_start(dc))
+ /* Block writeback thread, but spawn it */
+ down_write(&dc->writeback_lock);
+ if (bch_cached_dev_writeback_start(dc)) {
+ up_write(&dc->writeback_lock);
return -ENOMEM;
+ }
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ /* Allow the writeback thread to proceed */
+ up_write(&dc->writeback_lock);
+
pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
+ if (!c)
+ closure_return(cl);
+
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
return 0;
}
-static void register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = "cannot allocate memory";
+ const char *err = NULL;
+ int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- if (cache_alloc(sb, ca) != 0)
+ ret = cache_alloc(sb, ca);
+ if (ret != 0)
goto err;
- err = "error creating kobject";
- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
- goto err;
+ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
mutex_unlock(&bch_register_lock);
- if (err)
- goto err;
+ if (err) {
+ ret = -ENODEV;
+ goto out;
+ }
pr_info("registered cache device %s", bdevname(bdev, name));
+
out:
kobject_put(&ca->kobj);
- return;
+
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- goto out;
+ if (err)
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+
+ return ret;
}
/* Global interfaces/init */
@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!ca)
goto err_close;
- register_cache(sb, sb_page, bdev, ca);
+ if (register_cache(sb, sb_page, bdev, ca) != 0)
+ goto err_close;
}
out:
if (sb_page)
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index f6543f3a9..27f2ef300 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,19 +867,40 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+#define WRITE_LOCK(cmd) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
return -EINVAL; \
- down_write(&cmd->root_lock)
+ }
#define WRITE_LOCK_VOID(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
+ down_write(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_write(&cmd->root_lock); \
return; \
- down_write(&cmd->root_lock)
+ }
#define WRITE_UNLOCK(cmd) \
up_write(&cmd->root_lock)
+#define READ_LOCK(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return -EINVAL; \
+ }
+
+#define READ_LOCK_VOID(cmd) \
+ down_read(&cmd->root_lock); \
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
+ up_read(&cmd->root_lock); \
+ return; \
+ }
+
+#define READ_UNLOCK(cmd) \
+ up_read(&cmd->root_lock)
+
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
int r;
@@ -1015,22 +1036,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_discards(cmd, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
{
- dm_cblock_t r;
+ READ_LOCK(cmd);
+ *result = cmd->cache_blocks;
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- r = cmd->cache_blocks;
- up_read(&cmd->root_lock);
-
- return r;
+ return 0;
}
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
@@ -1188,9 +1207,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_mappings(cmd, policy, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1215,18 +1234,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
void dm_cache_dump(struct dm_cache_metadata *cmd)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
__dump_mappings(cmd);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = cmd->changed;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1276,9 +1295,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
*stats = cmd->stats;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
@@ -1312,9 +1331,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1324,9 +1343,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1417,7 +1436,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
- return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ int r;
+
+ READ_LOCK(cmd);
+ r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ READ_UNLOCK(cmd);
+
+ return r;
}
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
@@ -1440,10 +1465,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- /*
- * We ignore fail_io for this function.
- */
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
set_bit(NEEDS_CHECK, &cmd->flags);
r = superblock_lock(cmd, &sblock);
@@ -1458,19 +1480,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
dm_bm_unlock(sblock);
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
{
- bool needs_check;
+ READ_LOCK(cmd);
+ *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
- up_read(&cmd->root_lock);
-
- return needs_check;
+ return 0;
}
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 2ffee21f3..852874419 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
* origin blocks to map to.
*/
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
*/
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5780accff..bb9b92ebb 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{
- bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
+ bool needs_check;
enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
+ DMERR("unable to read needs_check flag, setting failure mode");
+ new_mode = CM_FAIL;
+ }
+
if (new_mode == CM_WRITE && needs_check) {
DMERR("%s: unable to switch cache to write mode until repaired.",
cache_device_name(cache));
@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
char buf[BDEVNAME_SIZE];
struct cache *cache = ti->private;
dm_cblock_t residency;
+ bool needs_check;
switch (type) {
case STATUSTYPE_INFO:
@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (dm_cache_metadata_needs_check(cache->cmd))
+ r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
+
+ if (r || needs_check)
DMEMIT("needs_check ");
else
DMEMIT("- ");
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 376638608..e4d1bafe7 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
+ dev_t origin_dev, cow_dev;
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot get origin device";
goto bad_origin;
}
+ origin_dev = s->origin->bdev->bd_dev;
cow_path = argv[0];
argv++;
argc--;
+ cow_dev = dm_get_dev_t(cow_path);
+ if (cow_dev && cow_dev == origin_dev) {
+ ti->error = "COW device cannot be the same as origin device";
+ r = -EINVAL;
+ goto bad_cow;
+ }
+
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 061152a43..cb5d0daf5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
}
/*
+ * Convert the path to a device
+ */
+dev_t dm_get_dev_t(const char *path)
+{
+ dev_t uninitialized_var(dev);
+ struct block_device *bdev;
+
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ dev = name_to_dev_t(path);
+ else {
+ dev = bdev->bd_dev;
+ bdput(bdev);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(dm_get_dev_t);
+
+/*
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev **result)
{
int r;
- dev_t uninitialized_var(dev);
+ dev_t dev;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
- struct block_device *bdev;
BUG_ON(!t);
- /* convert the path to a device */
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev)) {
- dev = name_to_dev_t(path);
- if (!dev)
- return -ENODEV;
- } else {
- dev = bdev->bd_dev;
- bdput(bdev);
- }
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
dd = find_device(&t->devices, dev);
if (!dd) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index f962d6453..185010d9c 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
- dm_tm_issue_prefetches(pmd->tm);
+ down_read(&pmd->root_lock);
+ if (!pmd->fail_io)
+ dm_tm_issue_prefetches(pmd->tm);
+ up_read(&pmd->root_lock);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dd834927b..c338aebb4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1109,12 +1109,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue) {
- if (md->queue->mq_ops)
- blk_mq_run_hw_queues(md->queue, true);
- else
- blk_run_queue_async(md->queue);
- }
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1214,9 +1210,9 @@ static void dm_requeue_original_request(struct mapped_device *md,
{
int rw = rq_data_dir(rq);
+ rq_end_stats(md, rq);
dm_unprep_request(rq);
- rq_end_stats(md, rq);
if (!rq->q->mq_ops)
old_requeue_request(rq);
else {
@@ -1336,7 +1332,10 @@ static void dm_complete_request(struct request *rq, int error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_complete_request(rq);
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
}
/*
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 0a72ab6e6..dd483bb2e 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- mp_bh->bio = *bio;
+ bio_init(&mp_bh->bio);
+ __bio_clone_fast(&mp_bh->bio, bio);
+
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4e3843f7d..bb5bce059 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c1447dd3..e3fd725d5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b4f02c995..32d52878f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
int hash)
{
int size;
- unsigned long do_wakeup = 0;
- int i = 0;
+ bool do_wakeup = false;
unsigned long flags;
if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
!list_empty(list))
atomic_dec(&conf->empty_inactive_list_nr);
list_splice_tail_init(list, conf->inactive_list + hash);
- do_wakeup |= 1 << hash;
+ do_wakeup = true;
spin_unlock_irqrestore(conf->hash_locks + hash, flags);
}
size--;
hash--;
}
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- if (do_wakeup & (1 << i))
- wake_up(&conf->wait_for_stripe[i]);
- }
-
if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
if (atomic_read(&conf->active_stripes) == 0)
wake_up(&conf->wait_for_quiescent);
if (conf->retry_read_aligned)
@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
- wait_event_exclusive_cmd(
- conf->wait_for_stripe[hash],
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
|| !test_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state)),
- spin_unlock_irq(conf->hash_locks + hash),
- spin_lock_irq(conf->hash_locks + hash));
+ *(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
} else {
@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
} while (sh == NULL);
- if (!list_empty(conf->inactive_list + hash))
- wake_up(&conf->wait_for_stripe[hash]);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
unsigned long cpu;
int err = 0;
+ /*
+ * Never shrink. And mddev_suspend() could deadlock if this is called
+ * from raid5d. In that case, scribble_disks and scribble_sectors
+ * should equal to new_disks and new_sectors
+ */
+ if (conf->scribble_disks >= new_disks &&
+ conf->scribble_sectors >= new_sectors)
+ return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
for_each_present_cpu(cpu) {
@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
}
put_online_cpus();
mddev_resume(conf->mddev);
+ if (!err) {
+ conf->scribble_disks = new_disks;
+ conf->scribble_sectors = new_sectors;
+ }
return err;
}
@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
lock_device_hash_lock(conf, hash);
- wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
+ wait_event_cmd(conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash),
unlock_device_hash_lock(conf, hash),
lock_device_hash_lock(conf, hash));
@@ -4236,7 +4239,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
(1 << STRIPE_SYNCING) |
(1 << STRIPE_REPLACED) |
- (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
@@ -4251,6 +4253,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_REPLACED)));
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -6413,6 +6416,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
}
put_online_cpus();
+ if (!err) {
+ conf->scribble_disks = max(conf->raid_disks,
+ conf->previous_raid_disks);
+ conf->scribble_sectors = max(conf->chunk_sectors,
+ conf->prev_chunk_sectors);
+ }
return err;
}
@@ -6503,9 +6512,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- init_waitqueue_head(&conf->wait_for_stripe[i]);
- }
+ init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
@@ -7014,8 +7021,8 @@ static int raid5_run(struct mddev *mddev)
}
if (discard_supported &&
- mddev->queue->limits.max_discard_sectors >= stripe &&
- mddev->queue->limits.discard_granularity >= stripe)
+ mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
+ mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a415e1cd3..517d4b68a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -510,6 +510,8 @@ struct r5conf {
* conversions
*/
} __percpu *percpu;
+ int scribble_disks;
+ int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -522,7 +524,7 @@ struct r5conf {
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
- wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
+ wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 471fd23b5..08d2c6bf7 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1161,12 +1161,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
}
}
+static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_edid_detect ed;
+
+ /* We failed to read the EDID, so send an event for this. */
+ ed.present = false;
+ ed.segment = adv7511_rd(sd, 0xc4);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
+}
+
static void adv7511_edid_handler(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
struct v4l2_subdev *sd = &state->sd;
- struct adv7511_edid_detect ed;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@@ -1191,9 +1202,7 @@ static void adv7511_edid_handler(struct work_struct *work)
}
/* We failed to read the EDID, so send an event for this. */
- ed.present = false;
- ed.segment = adv7511_rd(sd, 0xc4);
- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ adv7511_notify_no_edid(sd);
v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
}
@@ -1264,7 +1273,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
/* update read only ctrls */
v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
@@ -1294,6 +1302,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
}
adv7511_s_power(sd, false);
memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+ adv7511_notify_no_edid(sd);
}
}
@@ -1370,6 +1379,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
}
/* one more segment read ok */
state->edid.segments = segment + 1;
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
@@ -1389,7 +1399,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
ed.present = true;
ed.segment = 0;
state->edid_detect_counter++;
- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
return ed.present;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 9400e9960..bedbd51fb 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
+ unsigned int *width_mask,
+ unsigned int *width_bias)
+{
+ if (fmt->flags & FORMAT_FLAGS_PLANAR) {
+ *width_mask = ~15; /* width must be a multiple of 16 pixels */
+ *width_bias = 8; /* nearest */
+ } else {
+ *width_mask = ~3; /* width must be a multiple of 4 pixels */
+ *width_bias = 2; /* nearest */
+ }
+}
+
static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
enum v4l2_field field;
__s32 width, height;
__s32 height2;
+ unsigned int width_mask, width_bias;
int rc;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
rc = limit_scaled_size_lock(fh, &width, &height, field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 0);
if (0 != rc)
@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
__s32 width, height;
+ unsigned int width_mask, width_bias;
enum v4l2_field field;
retval = bttv_switch_type(fh, f->type);
@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 1);
if (0 != retval)
@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-
/* update our state informations */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index a63c1366a..1293563b7 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1219,10 +1219,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = dev->height;
f->fmt.pix.field = dev->field;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * dev->fmt->depth) >> 3;
+ if (dev->fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1298,10 +1301,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
if (f->fmt.pix.height > maxh)
f->fmt.pix.height = maxh;
f->fmt.pix.width &= ~0x03;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fmt->depth) >> 3;
+ if (fmt->planar)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.sizeimage =
- f->fmt.pix.height * f->fmt.pix.bytesperline;
+ (f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 7d28899f8..6efe9d002 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
/* Calculate bytesused field */
if (dst_buf->sequence == 0) {
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2]);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 086cf1c7b..18aed5dd3 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
{ USB_DEVICE(0x0471, 0x0312) },
{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+ { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
name = "Philips SPC 900NC webcam";
type_id = 740;
break;
+ case 0x032C:
+ PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+ name = "Philips SPC 880NC webcam";
+ type_id = 740;
+ break;
default:
return -ENODEV;
break;
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8fd84a674..019644ff6 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
get_user(kp->index, &up->index) ||
get_user(kp->type, &up->type) ||
get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory))
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
- if (get_user(kp->length, &up->length))
- return -EFAULT;
-
num_planes = kp->length;
if (num_planes == 0) {
kp->m.planes = NULL;
@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (get_user(kp->length, &up->length) ||
- get_user(kp->m.offset, &up->m.offset))
+ if (get_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
{
compat_long_t tmp;
- if (get_user(kp->length, &up->length) ||
- get_user(tmp, &up->m.userptr))
+ if (get_user(tmp, &up->m.userptr))
return -EFAULT;
kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
put_user(kp->sequence, &up->sequence) ||
put_user(kp->reserved2, &up->reserved2) ||
- put_user(kp->reserved, &up->reserved))
+ put_user(kp->reserved, &up->reserved) ||
+ put_user(kp->length, &up->length))
return -EFAULT;
if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
} else {
switch (kp->memory) {
case V4L2_MEMORY_MMAP:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.offset, &up->m.offset))
+ if (put_user(kp->m.offset, &up->m.offset))
return -EFAULT;
break;
case V4L2_MEMORY_USERPTR:
- if (put_user(kp->length, &up->length) ||
- put_user(kp->m.userptr, &up->m.userptr))
+ if (put_user(kp->m.userptr, &up->m.userptr))
return -EFAULT;
break;
case V4L2_MEMORY_OVERLAY:
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0b05aa938..1a173d0af 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
+
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
@@ -109,6 +114,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
cb = mei_cl_read_cb(cl, NULL);
if (cb)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index fe207e542..5fbffdb6b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -589,6 +589,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_card *card;
int err = 0, ioc_err = 0;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
@@ -631,6 +639,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
int i, err = 0, ioc_err = 0;
__u64 num_of_cmds;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
sizeof(num_of_cmds)))
return -EFAULT;
@@ -688,14 +704,6 @@ cmd_err:
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- /*
- * The caller must have CAP_SYS_RAWIO, and must be calling this on the
- * whole block device, not on a partition. This prevents overspray
- * between sibling partitions.
- */
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
- return -EPERM;
-
switch (cmd) {
case MMC_IOC_CMD:
return mmc_blk_ioctl_cmd(bdev,
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 851ccd9ac..25c179592 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2443,7 +2443,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
dma_cap_mask_t mask;
- if (!pdata->dma_filter)
+ if (!pdata || !pdata->dma_filter)
return -ENODEV;
dma_cap_zero(mask);
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 3446097a4..e77d79c8c 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
host->pdata->cd_debounce);
if (status != 0)
goto fail_add_host;
+
+ /* The platform has a CD GPIO signal that may support
+ * interrupts, so let mmc_gpiod_request_cd_irq() decide
+ * if polling is needed or not.
+ */
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index f5edf9d3a..c7f27fe48 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -137,6 +137,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"conf-sdio3");
if (res) {
@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
* Configuration register, if the adjustment is not done,
* remove them from the capabilities.
*/
- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
* controller has different capabilities than the ones shown
* in its registers
*/
- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
if (of_property_read_bool(np, "no-1-8-v")) {
host->caps &= ~SDHCI_CAN_VDD_180;
host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 83c4bf7bc..0004721cd 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -147,10 +147,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
/* Advertise UHS modes as supported by host */
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
@@ -188,7 +194,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
unsigned long host_clk;
if (!clock)
- return;
+ return sdhci_set_clock(host, clock);
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
@@ -335,6 +341,10 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
+};
+
+static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
+ .pdata = &sdhci_tegra114_pdata,
.nvquirks = NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_DDR50 |
NVQUIRK_ENABLE_SDR104,
@@ -357,7 +367,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
static const struct of_device_id sdhci_tegra_dt_match[] = {
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
- { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 },
+ { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index add9fdfd1..8059d7248 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -465,8 +465,6 @@ static void sdhci_adma_mark_end(void *desc)
static int sdhci_adma_table_pre(struct sdhci_host *host,
struct mmc_data *data)
{
- int direction;
-
void *desc;
void *align;
dma_addr_t addr;
@@ -483,20 +481,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
* We currently guess that it is LE.
*/
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- host->align_addr = dma_map_single(mmc_dev(host->mmc),
- host->align_buffer, host->align_buffer_sz, direction);
- if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
- goto fail;
- BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
-
host->sg_count = sdhci_pre_dma_transfer(host, data);
if (host->sg_count < 0)
- goto unmap_align;
+ return -EINVAL;
desc = host->adma_table;
align = host->align_buffer;
@@ -570,22 +557,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
/* nop, end, valid */
sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
}
-
- /*
- * Resync align buffer as we might have changed it.
- */
- if (data->flags & MMC_DATA_WRITE) {
- dma_sync_single_for_device(mmc_dev(host->mmc),
- host->align_addr, host->align_buffer_sz, direction);
- }
-
return 0;
-
-unmap_align:
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- host->align_buffer_sz, direction);
-fail:
- return -EINVAL;
}
static void sdhci_adma_table_post(struct sdhci_host *host,
@@ -605,9 +577,6 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
else
direction = DMA_TO_DEVICE;
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- host->align_buffer_sz, direction);
-
/* Do a quick scan of the SG list for any unaligned mappings */
has_unaligned = false;
for_each_sg(data->sg, sg, host->sg_count, i)
@@ -666,9 +635,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
if (!data)
target_timeout = cmd->busy_timeout * 1000;
else {
- target_timeout = data->timeout_ns / 1000;
- if (host->clock)
- target_timeout += data->timeout_clks / host->clock;
+ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
+ if (host->clock && data->timeout_clks) {
+ unsigned long long val;
+
+ /*
+ * data->timeout_clks is in units of clock cycles.
+ * host->clock is in Hz. target_timeout is in us.
+ * Hence, us = 1000000 * cycles / Hz. Round up.
+ */
+ val = 1000000 * data->timeout_clks;
+ if (do_div(val, host->clock))
+ target_timeout++;
+ target_timeout += val;
+ }
}
/*
@@ -1003,6 +983,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
WARN_ON(host->cmd);
+ /* Initially, a command has no error */
+ cmd->error = 0;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1097,8 +1080,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
}
}
- host->cmd->error = 0;
-
/* Finished CMD23, now send actual command. */
if (host->cmd == host->mrq->sbc) {
host->cmd = NULL;
@@ -2114,14 +2095,13 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct sdhci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- if (host->flags & SDHCI_REQ_USE_DMA) {
- if (data->host_cookie == COOKIE_GIVEN ||
- data->host_cookie == COOKIE_MAPPED)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- data->host_cookie = COOKIE_UNMAPPED;
- }
+ if (data->host_cookie == COOKIE_GIVEN ||
+ data->host_cookie == COOKIE_MAPPED)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ data->host_cookie = COOKIE_UNMAPPED;
}
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
@@ -2238,6 +2218,22 @@ static void sdhci_tasklet_finish(unsigned long param)
mrq = host->mrq;
/*
+ * Always unmap the data buffers if they were mapped by
+ * sdhci_prepare_data() whenever we finish with a request.
+ * This avoids leaking DMA mappings on error.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ struct mmc_data *data = mrq->data;
+
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+ }
+
+ /*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
@@ -2322,13 +2318,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if (intmask & SDHCI_INT_TIMEOUT)
- host->cmd->error = -ETIMEDOUT;
- else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
- SDHCI_INT_INDEX))
- host->cmd->error = -EILSEQ;
+ if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
+ SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->cmd->error = -EILSEQ;
+
+ /*
+ * If this command initiates a data phase and a response
+ * CRC error is signalled, the card can start transferring
+ * data - the card may have received the command without
+ * error. We must not terminate the mmc_request early.
+ *
+ * If the card did not receive the command or returned an
+ * error which prevented it sending data, the data phase
+ * will time out.
+ */
+ if (host->cmd->data &&
+ (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+ SDHCI_INT_CRC) {
+ host->cmd = NULL;
+ return;
+ }
- if (host->cmd->error) {
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -2967,14 +2980,21 @@ int sdhci_add_host(struct sdhci_host *host)
&host->adma_addr,
GFP_KERNEL);
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
- host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
+ host->align_buffer = dma_alloc_coherent(mmc_dev(mmc),
+ host->align_buffer_sz,
+ &host->align_addr,
+ GFP_KERNEL);
if (!host->adma_table || !host->align_buffer) {
if (host->adma_table)
dma_free_coherent(mmc_dev(mmc),
host->adma_table_sz,
host->adma_table,
host->adma_addr);
- kfree(host->align_buffer);
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc),
+ host->align_buffer_sz,
+ host->align_buffer,
+ host->align_addr);
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
@@ -2986,10 +3006,14 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_ADMA;
dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
host->adma_table, host->adma_addr);
- kfree(host->align_buffer);
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
+ host->align_buffer, host->align_addr);
host->adma_table = NULL;
host->align_buffer = NULL;
}
+
+ /* dma_alloc_coherent returns page aligned and sized buffers */
+ BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
}
/*
@@ -3072,14 +3096,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
+
mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
host->ops->get_max_timeout_count(host) : 1 << 27;
mmc->max_busy_timeout /= host->timeout_clk;
}
- if (override_timeout_clk)
- host->timeout_clk = override_timeout_clk;
-
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
@@ -3452,7 +3476,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
if (host->adma_table)
dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
host->adma_table, host->adma_addr);
- kfree(host->align_buffer);
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz,
+ host->align_buffer, host->align_addr);
host->adma_table = NULL;
host->align_buffer = NULL;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 43b3392ff..652d01832 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = mtd_block_markbad(mtd, ofs);
+ ret = this->block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b0ae69f84..acb1c5b2b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3720,7 +3720,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 696852eb2..7a3f990c1 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
/* Module stuff handled via irda_ldisc.owner - Jean II */
- /* First make sure we're not already connected. */
- if (tty->disc_data != NULL) {
- priv = tty->disc_data;
- if (priv && priv->magic == IRTTY_MAGIC) {
- ret = -EEXIST;
- goto out;
- }
- tty->disc_data = NULL; /* ### */
- }
-
/* stop the underlying driver */
irtty_stop_receiver(tty, TRUE);
if (tty->ops->stop)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 01f08a775..e7034c55e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
- spin_lock(&rnet->lock);
+ spin_lock(&rnet->tx_lock);
if (netif_msg_intr(rnet))
printk(KERN_INFO
@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
netif_wake_queue(ndev);
- spin_unlock(&rnet->lock);
+ spin_unlock(&rnet->tx_lock);
}
static int rionet_open(struct net_device *ndev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 2ca783fa5..7e269f9aa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -32,7 +32,7 @@
#define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256)
#define BRCMF_FLOWRING_INVALID_IFIDX 0xff
-#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
+#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
static const u8 brcmf_flowring_prio2fifo[] = {
@@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
u8 prio, u8 ifidx)
{
struct brcmf_flowring_hash *hash;
- u8 hash_idx;
+ u16 hash_idx;
u32 i;
bool found;
bool sta;
@@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
}
hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
found = false;
hash = flow->hash;
for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
@@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
break;
}
hash_idx++;
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
}
if (found)
return hash[hash_idx].flowid;
@@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
{
struct brcmf_flowring_ring *ring;
struct brcmf_flowring_hash *hash;
- u8 hash_idx;
+ u16 hash_idx;
u32 i;
bool found;
u8 fifo;
@@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
}
hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
found = false;
hash = flow->hash;
for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
@@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
break;
}
hash_idx++;
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
}
if (found) {
for (i = 0; i < flow->nrofrings; i++) {
@@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
}
-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
@@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
}
-static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
+static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
bool blocked)
{
struct brcmf_flowring_ring *ring;
@@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
}
-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
- u8 hash_idx;
+ u16 hash_idx;
struct sk_buff *skb;
ring = flow->rings[flowid];
@@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
}
-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
}
-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
struct sk_buff *skb;
@@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
}
-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
}
-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
@@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
}
-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
+void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
@@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
}
-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
- u8 hash_idx;
+ u16 hash_idx;
ring = flow->rings[flowid];
hash_idx = ring->hash_id;
@@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_flowring_tdls_entry *search;
struct brcmf_flowring_tdls_entry *remove;
- u8 flowid;
+ u16 flowid;
for (flowid = 0; flowid < flow->nrofrings; flowid++) {
if (flow->rings[flowid])
@@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
struct brcmf_pub *drvr = bus_if->drvr;
u32 i;
- u8 flowid;
+ u16 flowid;
if (flow->addr_mode[ifidx] != addr_mode) {
for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
@@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
struct brcmf_flowring_tdls_entry *prev;
struct brcmf_flowring_tdls_entry *search;
u32 i;
- u8 flowid;
+ u16 flowid;
bool sta;
sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
index 95fd1c967..068e68d94 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
@@ -16,7 +16,7 @@
#define BRCMFMAC_FLOWRING_H
-#define BRCMF_FLOWRING_HASHSIZE 256
+#define BRCMF_FLOWRING_HASHSIZE 512 /* has to be 2^x */
#define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF
@@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
u8 mac[ETH_ALEN];
u8 fifo;
u8 ifidx;
- u8 flowid;
+ u16 flowid;
};
enum ring_status {
@@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
u8 prio, u8 ifidx);
u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
u8 prio, u8 ifidx);
-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
+void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb);
-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb);
-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
void brcmf_flowring_detach(struct brcmf_flowring *flow);
void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index c2bdb9174..922966734 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
}
-static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
+static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
{
struct brcmf_flowring *flow = msgbuf->flow;
struct brcmf_commonring *commonring;
@@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
}
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
{
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct msgbuf_tx_flowring_delete_req *delete;
@@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
u32 count;
if_msgbuf = drvr->bus_if->msgbuf;
+
+ if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
+ brcmf_err("driver not configured for this many flowrings %d\n",
+ if_msgbuf->nrof_flowrings);
+ if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
+ }
+
msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
if (!msgbuf)
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 3d513e407..ee6906a3c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -33,7 +33,7 @@
int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
#else
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index ee7d97482..e6a14a45f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x01a2) },
{ USB_DEVICE(0x0411, 0x01ee) },
{ USB_DEVICE(0x0411, 0x01a8) },
+ { USB_DEVICE(0x0411, 0x01fd) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5d28e9405..576eb7013 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -513,10 +513,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
/* fail write commands (when read-only) */
if (read_only)
- switch (ioctl_cmd) {
- case ND_IOCTL_VENDOR:
- case ND_IOCTL_SET_CONFIG_DATA:
- case ND_IOCTL_ARS_START:
+ switch (cmd) {
+ case ND_CMD_VENDOR:
+ case ND_CMD_SET_CONFIG_DATA:
+ case ND_CMD_ARS_START:
dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 8d0b54670..544b802a5 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -66,22 +66,25 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
unsigned int len, unsigned int off, int rw,
sector_t sector)
{
+ int rc = 0;
void *mem = kmap_atomic(page);
phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
if (rw == READ) {
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
- return -EIO;
- memcpy_from_pmem(mem + off, pmem_addr, len);
- flush_dcache_page(page);
+ rc = -EIO;
+ else {
+ memcpy_from_pmem(mem + off, pmem_addr, len);
+ flush_dcache_page(page);
+ }
} else {
flush_dcache_page(page);
memcpy_to_pmem(pmem_addr, mem + off, len);
}
kunmap_atomic(mem);
- return 0;
+ return rc;
}
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1a3556a9e..ed01c0172 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
+ phys_addr_t base;
/*
* We use __memblock_alloc_base() because memblock_alloc_base()
* panic()s on allocation failure.
*/
- phys_addr_t base = __memblock_alloc_base(size, align, end);
+ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
+ base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6d7ab9bb0..6b0056e9c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
+ if (dev->non_compliant_bars)
+ return 0;
+
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
/* No printks while decoding is disabled! */
@@ -1171,6 +1174,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
int pci_setup_device(struct pci_dev *dev)
{
u32 class;
+ u16 cmd;
u8 hdr_type;
int pos = 0;
struct pci_bus_region region;
@@ -1214,6 +1218,16 @@ int pci_setup_device(struct pci_dev *dev)
/* device class may be changed after fixup */
class = dev->class >> 8;
+ if (dev->non_compliant_bars) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
+ cmd &= ~PCI_COMMAND_IO;
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ }
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0f5997ceb..08b1d93da 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (num_pulls) {
err = of_property_read_u32_index(np, "brcm,pull",
- (num_funcs > 1) ? i : 0, &pull);
+ (num_pulls > 1) ? i : 0, &pull);
if (err)
goto out;
err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d78ee151c..be3bc2f4e 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad Y700-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad Y700 Touch-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-17ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d72867257..3eff2a69f 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *cmd;
- if (list_empty(&hostdata->autosense)) {
+ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
list_for_each_entry(ncmd, &hostdata->unissued, list) {
cmd = NCR5380_to_scmd(ncmd);
dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- if (hostdata->sensing) {
+ if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
list_add(&ncmd->list, &hostdata->autosense);
hostdata->sensing = NULL;
@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, main_task);
struct Scsi_Host *instance = hostdata->host;
- struct scsi_cmnd *cmd;
int done;
do {
done = 1;
spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected &&
- (cmd = dequeue_next_cmd(instance))) {
+ while (!hostdata->connected && !hostdata->selecting) {
+ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
+
+ if (!cmd)
+ break;
dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
* entire unit.
*/
- cmd = NCR5380_select(instance, cmd);
- if (!cmd) {
+ if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
} else {
dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Reselection interrupt */
goto out;
}
+ if (!hostdata->selecting) {
+ /* Command was aborted */
+ NCR5380_write(MODE_REG, MR_BASE);
+ goto out;
+ }
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
shost_printk(KERN_ERR, instance,
@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char msgout = NOP;
int sink = 0;
int len;
-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
int transfersize;
-#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ hostdata->connected = NULL;
return;
#endif
case PHASE_DATAIN:
@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
/* XXX - need to source or sink data here, as appropriate */
} else
cmd->SCp.this_residual -= transfersize - len;
} else
#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
{
- spin_unlock_irq(&hostdata->lock);
- NCR5380_transfer_pio(instance, &phase,
- (int *)&cmd->SCp.this_residual,
+ /* Break up transfer into 3 ms chunks,
+ * presuming 6 accesses per handshake.
+ */
+ transfersize = min((unsigned long)cmd->SCp.this_residual,
+ hostdata->accesses_per_ms / 2);
+ len = transfersize;
+ NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
- spin_lock_irq(&hostdata->lock);
+ cmd->SCp.this_residual -= transfersize - len;
}
- break;
+ return;
case PHASE_MSGIN:
len = 1;
data = &tmp;
@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
* [disconnected -> connected ->]...
* [autosense -> connected ->] done
*
- * If cmd is unissued then just remove it.
- * If cmd is disconnected, try to select the target.
- * If cmd is connected, try to send an abort message.
- * If cmd is waiting for autosense, give it a chance to complete but check
- * that it isn't left connected.
* If cmd was not found at all then presumably it has already been completed,
* in which case return SUCCESS to try to avoid further EH measures.
+ *
* If the command has not completed yet, we must not fail to find it.
+ * We have no option but to forget the aborted command (even if it still
+ * lacks sense data). The mid-layer may re-issue a command that is in error
+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
+ * this driver are such that a command can appear on one queue only.
+ *
+ * The lock protects driver data structures, but EH handlers also use it
+ * to serialize their own execution and prevent their own re-entry.
*/
static int NCR5380_abort(struct scsi_cmnd *cmd)
@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ goto out;
}
if (hostdata->selecting == cmd) {
@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->disconnected, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from disconnected list\n", cmd);
- cmd->result = DID_ERROR << 16;
- if (!hostdata->connected)
- NCR5380_select(instance, cmd);
- if (hostdata->connected != cmd) {
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
+ /* Can't call NCR5380_select() and send ABORT because that
+ * means releasing the lock. Need a bus reset.
+ */
+ set_host_byte(cmd, DID_ERROR);
+ complete_cmd(instance, cmd);
+ result = FAILED;
+ goto out;
}
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
#ifdef REAL_DMA
hostdata->dma_len = 0;
#endif
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- /* Perform autosense for this command */
- list_add(&ncmd->list, &hostdata->autosense);
- }
- }
-
- if (list_find_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: found %p on sense queue\n", cmd);
- spin_unlock_irqrestore(&hostdata->lock, flags);
- queue_work(hostdata->work_q, &hostdata->main_task);
- msleep(1000);
- spin_lock_irqsave(&hostdata->lock, flags);
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
goto out;
}
set_host_byte(cmd, DID_ABORT);
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
+ complete_cmd(instance, cmd);
+ goto out;
+ }
+
+ if (list_del_cmd(&hostdata->autosense, cmd)) {
+ dsprintk(NDEBUG_ABORT, instance,
+ "abort: removed %p from sense queue\n", cmd);
+ set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- hostdata->selecting = NULL;
+ if (list_del_cmd(&hostdata->unissued, cmd)) {
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ }
+
+ if (hostdata->selecting) {
+ hostdata->selecting->result = DID_RESET << 16;
+ complete_cmd(instance, hostdata->selecting);
+ hostdata->selecting = NULL;
+ }
list_for_each_entry(ncmd, &hostdata->disconnected, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->disconnected);
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->autosense);
if (hostdata->connected) {
set_host_byte(hostdata->connected, DID_RESET);
@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
hostdata->connected = NULL;
}
- if (hostdata->sensing) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->sensing);
- hostdata->sensing = NULL;
- }
-
for (i = 0; i < 8; ++i)
hostdata->busy[i] = 0;
#ifdef REAL_DMA
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 074878b55..d044f3f27 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -944,6 +944,7 @@ struct fib {
*/
struct list_head fiblink;
void *data;
+ u32 vector_no;
struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -2113,6 +2114,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
int aac_fib_setup(struct aac_dev *dev);
void aac_fib_map_free(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1f90fe84..4cbf54928 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev,
- dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- dev->hw_fib_va, dev->hw_fib_pa);
+ if (dev->hw_fib_va && dev->max_fib_size) {
+ pci_free_consistent(dev->pdev,
+ (dev->max_fib_size *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ }
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+ u32 i = 0;
+ u32 vector = 1;
+ struct fib *fibptr = NULL;
+
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++) {
+ if ((dev->max_msix == 1) ||
+ (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+ - dev->vector_cap))) {
+ fibptr->vector_no = 0;
+ } else {
+ fibptr->vector_no = vector;
+ vector++;
+ if (vector == dev->max_msix)
+ vector = 1;
+ }
+ }
+}
+
/**
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
hw_fib_pa = hw_fib_pa +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
+
+ /*
+ *Assign vector numbers to fibs
+ */
+ aac_fib_vector_assign(dev);
+
/*
* Add the fib chain to the free list
*/
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 76eaa38ff..8a8e84548 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1404,8 +1404,18 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if (!dev->sync_mode)
+ /*max msix may change after EEH
+ * Re-assign vectors to fibs
+ */
+ aac_fib_vector_assign(dev);
+
+ if (!dev->sync_mode) {
+ /* After EEH recovery or suspend resume, max_msix count
+ * may change, therfore updating in init as well.
+ */
aac_adapter_start(dev);
+ dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ }
return 0;
error_iounmap:
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2aa34ea8c..bc0203f3d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
break;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+ u16 vector_no;
atomic_inc(&q->numpending);
if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
dev->max_msix > 1) {
- u_int16_t vector_no, first_choice = 0xffff;
-
- vector_no = dev->fibs_pushed_no % dev->max_msix;
- do {
- vector_no += 1;
- if (vector_no == dev->max_msix)
- vector_no = 1;
- if (atomic_read(&dev->rrq_outstanding[vector_no]) <
- dev->vector_cap)
- break;
- if (0xffff == first_choice)
- first_choice = vector_no;
- else if (vector_no == first_choice)
- break;
- } while (1);
- if (vector_no == first_choice)
- vector_no = 0;
- atomic_inc(&dev->rrq_outstanding[vector_no]);
- if (dev->fibs_pushed_no == 0xffffffff)
- dev->fibs_pushed_no = 0;
- else
- dev->fibs_pushed_no++;
+ vector_no = fib->vector_no;
fib->hw_fib_va->header.Handle += (vector_no << 16);
+ } else {
+ vector_no = 0;
}
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
fibsize = (hdr_size + 127) / 128 - 1;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b846a4683..fc6a83188 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
case AHC_DEV_Q_TAGGED:
scsi_change_queue_depth(sdev,
dev->openings + dev->active);
+ break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index e65478651..389825ba5 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *cmd;
- if (list_empty(&hostdata->autosense)) {
+ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
list_for_each_entry(ncmd, &hostdata->unissued, list) {
cmd = NCR5380_to_scmd(ncmd);
dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- if (hostdata->sensing) {
+ if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
list_add(&ncmd->list, &hostdata->autosense);
hostdata->sensing = NULL;
@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, main_task);
struct Scsi_Host *instance = hostdata->host;
- struct scsi_cmnd *cmd;
int done;
/*
@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
done = 1;
spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected &&
- (cmd = dequeue_next_cmd(instance))) {
+ while (!hostdata->connected && !hostdata->selecting) {
+ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
+
+ if (!cmd)
+ break;
dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
#ifdef SUPPORT_TAGS
cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
#endif
- cmd = NCR5380_select(instance, cmd);
- if (!cmd) {
+ if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
maybe_release_dma_irq(instance);
} else {
@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Reselection interrupt */
goto out;
}
+ if (!hostdata->selecting) {
+ /* Command was aborted */
+ NCR5380_write(MODE_REG, MR_BASE);
+ goto out;
+ }
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
shost_printk(KERN_ERR, instance,
@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char msgout = NOP;
int sink = 0;
int len;
-#if defined(REAL_DMA)
int transfersize;
-#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ hostdata->connected = NULL;
return;
#endif
case PHASE_DATAIN:
@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
/* XXX - need to source or sink data here, as appropriate */
} else {
#ifdef REAL_DMA
@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} else
#endif /* defined(REAL_DMA) */
{
- spin_unlock_irq(&hostdata->lock);
- NCR5380_transfer_pio(instance, &phase,
- (int *)&cmd->SCp.this_residual,
+ /* Break up transfer into 3 ms chunks,
+ * presuming 6 accesses per handshake.
+ */
+ transfersize = min((unsigned long)cmd->SCp.this_residual,
+ hostdata->accesses_per_ms / 2);
+ len = transfersize;
+ NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
- spin_lock_irq(&hostdata->lock);
+ cmd->SCp.this_residual -= transfersize - len;
}
#if defined(CONFIG_SUN3) && defined(REAL_DMA)
/* if we had intended to dma that command clear it */
if (sun3_dma_setup_done == cmd)
sun3_dma_setup_done = NULL;
#endif
- break;
+ return;
case PHASE_MSGIN:
len = 1;
data = &tmp;
@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
* [disconnected -> connected ->]...
* [autosense -> connected ->] done
*
- * If cmd is unissued then just remove it.
- * If cmd is disconnected, try to select the target.
- * If cmd is connected, try to send an abort message.
- * If cmd is waiting for autosense, give it a chance to complete but check
- * that it isn't left connected.
* If cmd was not found at all then presumably it has already been completed,
* in which case return SUCCESS to try to avoid further EH measures.
+ *
* If the command has not completed yet, we must not fail to find it.
+ * We have no option but to forget the aborted command (even if it still
+ * lacks sense data). The mid-layer may re-issue a command that is in error
+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
+ * this driver are such that a command can appear on one queue only.
+ *
+ * The lock protects driver data structures, but EH handlers also use it
+ * to serialize their own execution and prevent their own re-entry.
*/
static int NCR5380_abort(struct scsi_cmnd *cmd)
@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ goto out;
}
if (hostdata->selecting == cmd) {
@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->disconnected, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from disconnected list\n", cmd);
- cmd->result = DID_ERROR << 16;
- if (!hostdata->connected)
- NCR5380_select(instance, cmd);
- if (hostdata->connected != cmd) {
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
+ /* Can't call NCR5380_select() and send ABORT because that
+ * means releasing the lock. Need a bus reset.
+ */
+ set_host_byte(cmd, DID_ERROR);
+ complete_cmd(instance, cmd);
+ result = FAILED;
+ goto out;
}
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
#ifdef REAL_DMA
hostdata->dma_len = 0;
#endif
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- /* Perform autosense for this command */
- list_add(&ncmd->list, &hostdata->autosense);
- }
- }
-
- if (list_find_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: found %p on sense queue\n", cmd);
- spin_unlock_irqrestore(&hostdata->lock, flags);
- queue_work(hostdata->work_q, &hostdata->main_task);
- msleep(1000);
- spin_lock_irqsave(&hostdata->lock, flags);
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
goto out;
}
set_host_byte(cmd, DID_ABORT);
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
+ complete_cmd(instance, cmd);
+ goto out;
+ }
+
+ if (list_del_cmd(&hostdata->autosense, cmd)) {
+ dsprintk(NDEBUG_ABORT, instance,
+ "abort: removed %p from sense queue\n", cmd);
+ set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- hostdata->selecting = NULL;
+ if (list_del_cmd(&hostdata->unissued, cmd)) {
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ }
+
+ if (hostdata->selecting) {
+ hostdata->selecting->result = DID_RESET << 16;
+ complete_cmd(instance, hostdata->selecting);
+ hostdata->selecting = NULL;
+ }
list_for_each_entry(ncmd, &hostdata->disconnected, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->disconnected);
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->autosense);
if (hostdata->connected) {
set_host_byte(hostdata->connected, DID_RESET);
@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
hostdata->connected = NULL;
}
- if (hostdata->sensing) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->sensing);
- hostdata->sensing = NULL;
- }
-
#ifdef SUPPORT_TAGS
free_all_tags(hostdata);
#endif
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index cb9072a84..069e5c50a 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4468,6 +4468,7 @@ put_shost:
scsi_host_put(phba->shost);
free_kset:
iscsi_boot_destroy_kset(phba->boot_kset);
+ phba->boot_kset = NULL;
return -ENOMEM;
}
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index c12696613..ce79de822 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
+ /*
+ * Only set the 'VALID' bit if we can represent the value
+ * correctly; otherwise just fill out the lower bytes and
+ * clear the 'VALID' flag.
+ */
+ if (info <= 0xffffffffUL)
+ buf[0] |= 0x80;
+ else
+ buf[0] &= 0x7f;
+ put_unaligned_be32((u32)info, &buf[3]);
}
return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d749da765..5a5457ac9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
*/
if (sdkp->lbprz) {
q->limits.discard_alignment = 0;
- q->limits.discard_granularity = 1;
+ q->limits.discard_granularity = logical_block_size;
} else {
q->limits.discard_alignment = sdkp->unmap_alignment *
logical_block_size;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5e8206744..ae7d9bdf4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
+ (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 292c04eec..3ddcabb79 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
- case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
- if ((asc == 0x2a) && (ascq == 0x9)) {
+ case SRB_STATUS_ABORTED:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ (asc == 0x2a) && (ascq == 0x9)) {
do_work = true;
process_err_fn = storvsc_device_scan;
/*
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index b8dcf5a26..58d46893e 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
{
ion_test_pdev = platform_device_register_simple("ion-test",
-1, NULL, 0);
- if (!ion_test_pdev)
- return -ENODEV;
+ if (IS_ERR(ion_test_pdev))
+ return PTR_ERR(ion_test_pdev);
return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
}
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5e8130a7d..0e9f77924 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
-
- outl(data, dev->iobase + reg);
+ else
+ outl(data, dev->iobase + reg);
}
static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
-
- outw(data, dev->iobase + reg);
+ else
+ outw(data, dev->iobase + reg);
}
static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
-
- outb(data, dev->iobase + reg);
+ else
+ outb(data, dev->iobase + reg);
}
static uint32_t ni_readl(struct comedi_device *dev, int reg)
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 437f723bb..823e47910 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
unsigned long flags;
int ret = 0;
- if (trig_num != cmd->start_src)
+ if (trig_num != cmd->start_arg)
return -EINVAL;
spin_lock_irqsave(&counter->lock, flags);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 867bc6d0a..43d8b42c0 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2596,8 +2596,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
- list_del_init(&se_cmd->se_cmd_list);
-
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a0a8fd123..d4b54653e 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
{
enum thermal_trip_type type;
+ /* Ignore disabled trip points */
+ if (test_bit(trip, &tz->trips_disabled))
+ return;
+
tz->ops->get_trip_type(tz, trip, &type);
if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
@@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
{
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
+ int trip_temp;
int result;
int count;
int passive = 0;
@@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
for (count = 0; count < trips; count++) {
- tz->ops->get_trip_type(tz, count, &trip_type);
+ if (tz->ops->get_trip_type(tz, count, &trip_type))
+ set_bit(count, &tz->trips_disabled);
if (trip_type == THERMAL_TRIP_PASSIVE)
passive = 1;
+ if (tz->ops->get_trip_temp(tz, count, &trip_temp))
+ set_bit(count, &tz->trips_disabled);
+ /* Check for bogus trip points */
+ if (trip_temp == 0)
+ set_bit(count, &tz->trips_disabled);
}
if (!passive) {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 83aed6193..b1a6ba3c3 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -731,22 +731,16 @@ static int size_fifo(struct uart_8250_port *up)
*/
static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
{
- unsigned char old_dll, old_dlm, old_lcr;
- unsigned int id;
+ unsigned char old_lcr;
+ unsigned int id, old_dl;
old_lcr = serial_in(p, UART_LCR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+ old_dl = serial_dl_read(p);
+ serial_dl_write(p, 0);
+ id = serial_dl_read(p);
+ serial_dl_write(p, old_dl);
- old_dll = serial_in(p, UART_DLL);
- old_dlm = serial_in(p, UART_DLM);
-
- serial_out(p, UART_DLL, 0);
- serial_out(p, UART_DLM, 0);
-
- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
- serial_out(p, UART_DLL, old_dll);
- serial_out(p, UART_DLM, old_dlm);
serial_out(p, UART_LCR, old_lcr);
return id;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fa4e23930..d37fdcc31 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1114,6 +1114,9 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
+ /* we would crash */
+ if (!data_interface || !control_interface)
+ return -ENODEV;
goto skip_normal_probe;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 56593a9a8..2057d91d8 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -502,11 +502,15 @@ static int usb_unbind_interface(struct device *dev)
int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
- struct device *dev = &iface->dev;
+ struct device *dev;
struct usb_device *udev;
int retval = 0;
int lpm_disable_error;
+ if (!iface)
+ return -ENODEV;
+
+ dev = &iface->dev;
if (dev->driver)
return -EBUSY;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 51b436918..84f65743f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4292,7 +4292,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
- int i, j, retval;
+ int retries, operations, retval, i;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
@@ -4394,7 +4394,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
bool did_new_scheme = false;
if (use_new_scheme(udev, retry_counter)) {
@@ -4421,7 +4421,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
- for (j = 0; j < 3; ++j) {
+ for (operations = 0; operations < 3; ++operations) {
buf->bMaxPacketSize0 = 0;
r = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
@@ -4441,7 +4441,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
r = -EPROTO;
break;
}
- if (r == 0)
+ /*
+ * Some devices time out if they are powered on
+ * when already connected. They need a second
+ * reset. But only on the first attempt,
+ * lest we get into a time out/reset loop
+ */
+ if (r == 0 || (r == -ETIMEDOUT && retries == 0))
break;
}
udev->descriptor.bMaxPacketSize0 =
@@ -4473,7 +4479,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* authorization will assign the final address.
*/
if (udev->wusb == 0) {
- for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
break;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c6bfd13f6..1950e87b4 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ if (iface_desc->desc.bNumEndpoints < 1) {
+ dev_err(&interface->dev, "Invalid number of endpoints\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 73a366de5..9bc0e090b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 01bf53392..244acb129 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
+ if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+ dev_err(&port->dev, "required endpoint is missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
cypress_set_termios(tty, port, &priv->tmp_termios);
/* setup the port and start reading from the device */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
- __func__);
- return -1;
- }
-
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 12b0e6747..3df7b7ec1 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
static int digi_startup(struct usb_serial *serial)
{
+ struct device *dev = &serial->interface->dev;
struct digi_serial *serial_priv;
int ret;
+ int i;
+
+ /* check whether the device has the expected number of endpoints */
+ if (serial->num_port_pointers < serial->type->num_ports + 1) {
+ dev_err(dev, "OOB endpoints missing\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+ if (!serial->port[i]->read_urb) {
+ dev_err(dev, "bulk-in endpoint missing\n");
+ return -ENODEV;
+ }
+ if (!serial->port[i]->write_urb) {
+ dev_err(dev, "bulk-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c660ae40..b61f12160 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* ICP DAS I-756xU devices */
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513..c5d6c1e73 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
#define NOVITUS_BONO_E_PID 0x6010
/*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID 0x1b5c
+#define ICPDAS_I7560U_PID 0x0103
+#define ICPDAS_I7561U_PID 0x0104
+#define ICPDAS_I7563U_PID 0x0105
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index fd707d6a1..89726f702 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
static int mct_u232_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
+ /* check first to simplify error handling */
+ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+ dev_err(&port->dev, "expected endpoint missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
- priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+ priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834..c6f497f16 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9ff9404f9..c90a7e46c 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -812,7 +812,7 @@ static struct scsi_host_template uas_host_template = {
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = 65536, /* Is there a limit on the _host_ ? */
+ .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 71e78ef4b..3a75f3b53 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
/* Fall through */
case WDIOC_GETTIMEOUT:
- return copy_to_user(argp, &timeout, sizeof(int));
+ return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
default:
return -ENOTTY;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index dc04844aa..5c1e6f459 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -233,6 +233,7 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
+source "fs/aufs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index b8da1c2c3..0c61756a2 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -127,3 +127,4 @@ obj-y += exofs/ # Multiple modules
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_AUFS_FS) += aufs/
diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig
new file mode 100644
index 000000000..63560ceda
--- /dev/null
+++ b/fs/aufs/Kconfig
@@ -0,0 +1,185 @@
+config AUFS_FS
+ tristate "Aufs (Advanced multi layered unification filesystem) support"
+ help
+ Aufs is a stackable unification filesystem such as Unionfs,
+ which unifies several directories and provides a merged single
+ directory.
+ In the early days, aufs was entirely re-designed and
+ re-implemented Unionfs Version 1.x series. Introducing many
+ original ideas, approaches and improvements, it becomes totally
+ different from Unionfs while keeping the basic features.
+
+if AUFS_FS
+choice
+ prompt "Maximum number of branches"
+ default AUFS_BRANCH_MAX_127
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_127
+ bool "127"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_511
+ bool "511"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_1023
+ bool "1023"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+config AUFS_BRANCH_MAX_32767
+ bool "32767"
+ help
+ Specifies the maximum number of branches (or member directories)
+ in a single aufs. The larger value consumes more system
+ resources and has a minor impact to performance.
+endchoice
+
+config AUFS_SBILIST
+ bool
+ depends on AUFS_MAGIC_SYSRQ || PROC_FS
+ default y
+ help
+ Automatic configuration for internal use.
+ When aufs supports Magic SysRq or /proc, enabled automatically.
+
+config AUFS_HNOTIFY
+ bool "Detect direct branch access (bypassing aufs)"
+ help
+ If you want to modify files on branches directly, eg. bypassing aufs,
+ and want aufs to detect the changes of them fully, then enable this
+ option and use 'udba=notify' mount option.
+ Currently there is only one available configuration, "fsnotify".
+ It will have a negative impact to the performance.
+ See detail in aufs.5.
+
+choice
+ prompt "method" if AUFS_HNOTIFY
+ default AUFS_HFSNOTIFY
+config AUFS_HFSNOTIFY
+ bool "fsnotify"
+ select FSNOTIFY
+endchoice
+
+config AUFS_EXPORT
+ bool "NFS-exportable aufs"
+ depends on EXPORTFS
+ help
+ If you want to export your mounted aufs via NFS, then enable this
+ option. There are several requirements for this configuration.
+ See detail in aufs.5.
+
+config AUFS_INO_T_64
+ bool
+ depends on AUFS_EXPORT
+ depends on 64BIT && !(ALPHA || S390)
+ default y
+ help
+ Automatic configuration for internal use.
+ /* typedef unsigned long/int __kernel_ino_t */
+ /* alpha and s390x are int */
+
+config AUFS_XATTR
+ bool "support for XATTR/EA (including Security Labels)"
+ help
+ If your branch fs supports XATTR/EA and you want to make them
+ available in aufs too, then enable this opsion and specify the
+ branch attributes for EA.
+ See detail in aufs.5.
+
+config AUFS_FHSM
+ bool "File-based Hierarchical Storage Management"
+ help
+ Hierarchical Storage Management (or HSM) is a well-known feature
+ in the storage world. Aufs provides this feature as file-based.
+ with multiple branches.
+ These multiple branches are prioritized, ie. the topmost one
+ should be the fastest drive and be used heavily.
+
+config AUFS_RDU
+ bool "Readdir in userspace"
+ help
+ Aufs has two methods to provide a merged view for a directory,
+ by a user-space library and by kernel-space natively. The latter
+ is always enabled but sometimes large and slow.
+ If you enable this option, install the library in aufs2-util
+ package, and set some environment variables for your readdir(3),
+ then the work will be handled in user-space which generally
+ shows better performance in most cases.
+ See detail in aufs.5.
+
+config AUFS_SHWH
+ bool "Show whiteouts"
+ help
+ If you want to make the whiteouts in aufs visible, then enable
+ this option and specify 'shwh' mount option. Although it may
+ sounds like philosophy or something, but in technically it
+ simply shows the name of whiteout with keeping its behaviour.
+
+config AUFS_BR_RAMFS
+ bool "Ramfs (initramfs/rootfs) as an aufs branch"
+ help
+ If you want to use ramfs as an aufs branch fs, then enable this
+ option. Generally tmpfs is recommended.
+ Aufs prohibited them to be a branch fs by default, because
+ initramfs becomes unusable after switch_root or something
+ generally. If you sets initramfs as an aufs branch and boot your
+ system by switch_root, you will meet a problem easily since the
+ files in initramfs may be inaccessible.
+ Unless you are going to use ramfs as an aufs branch fs without
+ switch_root or something, leave it N.
+
+config AUFS_BR_FUSE
+ bool "Fuse fs as an aufs branch"
+ depends on FUSE_FS
+ select AUFS_POLL
+ help
+ If you want to use fuse-based userspace filesystem as an aufs
+ branch fs, then enable this option.
+ It implements the internal poll(2) operation which is
+ implemented by fuse only (curretnly).
+
+config AUFS_POLL
+ bool
+ help
+ Automatic configuration for internal use.
+
+config AUFS_BR_HFSPLUS
+ bool "Hfsplus as an aufs branch"
+ depends on HFSPLUS_FS
+ default y
+ help
+ If you want to use hfsplus fs as an aufs branch fs, then enable
+ this option. This option introduces a small overhead at
+ copying-up a file on hfsplus.
+
+config AUFS_BDEV_LOOP
+ bool
+ depends on BLK_DEV_LOOP
+ default y
+ help
+ Automatic configuration for internal use.
+ Convert =[ym] into =y.
+
+config AUFS_DEBUG
+ bool "Debug aufs"
+ help
+ Enable this to compile aufs internal debug code.
+ It will have a negative impact to the performance.
+
+config AUFS_MAGIC_SYSRQ
+ bool
+ depends on AUFS_DEBUG && MAGIC_SYSRQ
+ default y
+ help
+ Automatic configuration for internal use.
+ When aufs supports Magic SysRq, enabled automatically.
+endif
diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile
new file mode 100644
index 000000000..c7efb62b5
--- /dev/null
+++ b/fs/aufs/Makefile
@@ -0,0 +1,36 @@
+
+include ${srctree}/${src}/magic.mk
+
+# cf. include/linux/kernel.h
+# enable pr_debug
+ccflags-y += -DDEBUG
+# sparse requires the full pathname
+ccflags-y += -include ${srctree}/include/uapi/linux/aufs_type.h
+
+obj-$(CONFIG_AUFS_FS) += aufs.o
+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \
+ wkq.o vfsub.o dcsub.o \
+ cpup.o whout.o wbr_policy.o \
+ dinfo.o dentry.o \
+ dynop.o \
+ finfo.o file.o f_op.o \
+ dir.o vdir.o \
+ iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \
+ mvdown.o ioctl.o
+
+# all are boolean
+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o
+aufs-$(CONFIG_SYSFS) += sysfs.o
+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o
+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o
+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o
+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o
+aufs-$(CONFIG_AUFS_EXPORT) += export.o
+aufs-$(CONFIG_AUFS_XATTR) += xattr.o
+aufs-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
+aufs-$(CONFIG_AUFS_FHSM) += fhsm.o
+aufs-$(CONFIG_AUFS_POLL) += poll.o
+aufs-$(CONFIG_AUFS_RDU) += rdu.o
+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o
+aufs-$(CONFIG_AUFS_DEBUG) += debug.o
+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o
diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h
new file mode 100644
index 000000000..49f43b433
--- /dev/null
+++ b/fs/aufs/aufs.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * all header files
+ */
+
+#ifndef __AUFS_H__
+#define __AUFS_H__
+
+#ifdef __KERNEL__
+
+#define AuStub(type, name, body, ...) \
+ static inline type name(__VA_ARGS__) { body; }
+
+#define AuStubVoid(name, ...) \
+ AuStub(void, name, , __VA_ARGS__)
+#define AuStubInt0(name, ...) \
+ AuStub(int, name, return 0, __VA_ARGS__)
+
+#include "debug.h"
+
+#include "branch.h"
+#include "cpup.h"
+#include "dcsub.h"
+#include "dbgaufs.h"
+#include "dentry.h"
+#include "dir.h"
+#include "dynop.h"
+#include "file.h"
+#include "fstype.h"
+#include "inode.h"
+#include "loop.h"
+#include "module.h"
+#include "opts.h"
+#include "rwsem.h"
+#include "spl.h"
+#include "super.h"
+#include "sysaufs.h"
+#include "vfsub.h"
+#include "whout.h"
+#include "wkq.h"
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_H__ */
diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c
new file mode 100644
index 000000000..496bdaa99
--- /dev/null
+++ b/fs/aufs/branch.c
@@ -0,0 +1,1394 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * branch management
+ */
+
+#include <linux/compat.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/*
+ * free a single branch
+ */
+static void au_br_do_free(struct au_branch *br)
+{
+ int i;
+ struct au_wbr *wbr;
+ struct au_dykey **key;
+
+ au_hnotify_fin_br(br);
+
+ if (br->br_xino.xi_file)
+ fput(br->br_xino.xi_file);
+ mutex_destroy(&br->br_xino.xi_nondir_mtx);
+
+ AuDebugOn(atomic_read(&br->br_count));
+
+ wbr = br->br_wbr;
+ if (wbr) {
+ for (i = 0; i < AuBrWh_Last; i++)
+ dput(wbr->wbr_wh[i]);
+ AuDebugOn(atomic_read(&wbr->wbr_wh_running));
+ AuRwDestroy(&wbr->wbr_wh_rwsem);
+ }
+
+ if (br->br_fhsm) {
+ au_br_fhsm_fin(br->br_fhsm);
+ kfree(br->br_fhsm);
+ }
+
+ key = br->br_dykey;
+ for (i = 0; i < AuBrDynOp; i++, key++)
+ if (*key)
+ au_dy_put(*key);
+ else
+ break;
+
+ /* recursive lock, s_umount of branch's */
+ lockdep_off();
+ path_put(&br->br_path);
+ lockdep_on();
+ kfree(wbr);
+ kfree(br);
+}
+
+/*
+ * frees all branches
+ */
+void au_br_free(struct au_sbinfo *sbinfo)
+{
+ aufs_bindex_t bmax;
+ struct au_branch **br;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ bmax = sbinfo->si_bend + 1;
+ br = sbinfo->si_branch;
+ while (bmax--)
+ au_br_do_free(*br++);
+}
+
+/*
+ * find the index of a branch which is specified by @br_id.
+ */
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id)
+{
+ aufs_bindex_t bindex, bend;
+
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++)
+ if (au_sbr_id(sb, bindex) == br_id)
+ return bindex;
+ return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * add a branch
+ */
+
+static int test_overlap(struct super_block *sb, struct dentry *h_adding,
+ struct dentry *h_root)
+{
+ if (unlikely(h_adding == h_root
+ || au_test_loopback_overlap(sb, h_adding)))
+ return 1;
+ if (h_adding->d_sb != h_root->d_sb)
+ return 0;
+ return au_test_subdir(h_adding, h_root)
+ || au_test_subdir(h_root, h_adding);
+}
+
+/*
+ * returns a newly allocated branch. @new_nbranch is a number of branches
+ * after adding a branch.
+ */
+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch,
+ int perm)
+{
+ struct au_branch *add_branch;
+ struct dentry *root;
+ struct inode *inode;
+ int err;
+
+ err = -ENOMEM;
+ root = sb->s_root;
+ add_branch = kzalloc(sizeof(*add_branch), GFP_NOFS);
+ if (unlikely(!add_branch))
+ goto out;
+
+ err = au_hnotify_init_br(add_branch, perm);
+ if (unlikely(err))
+ goto out_br;
+
+ if (au_br_writable(perm)) {
+ /* may be freed separately at changing the branch permission */
+ add_branch->br_wbr = kzalloc(sizeof(*add_branch->br_wbr),
+ GFP_NOFS);
+ if (unlikely(!add_branch->br_wbr))
+ goto out_hnotify;
+ }
+
+ if (au_br_fhsm(perm)) {
+ err = au_fhsm_br_alloc(add_branch);
+ if (unlikely(err))
+ goto out_wbr;
+ }
+
+ err = au_sbr_realloc(au_sbi(sb), new_nbranch);
+ if (!err)
+ err = au_di_realloc(au_di(root), new_nbranch);
+ if (!err) {
+ inode = d_inode(root);
+ err = au_ii_realloc(au_ii(inode), new_nbranch);
+ }
+ if (!err)
+ return add_branch; /* success */
+
+out_wbr:
+ kfree(add_branch->br_wbr);
+out_hnotify:
+ au_hnotify_fin_br(add_branch);
+out_br:
+ kfree(add_branch);
+out:
+ return ERR_PTR(err);
+}
+
+/*
+ * test if the branch permission is legal or not.
+ */
+static int test_br(struct inode *inode, int brperm, char *path)
+{
+ int err;
+
+ err = (au_br_writable(brperm) && IS_RDONLY(inode));
+ if (!err)
+ goto out;
+
+ err = -EINVAL;
+ pr_err("write permission for readonly mount or inode, %s\n", path);
+
+out:
+ return err;
+}
+
+/*
+ * returns:
+ * 0: success, the caller will add it
+ * plus: success, it is already unified, the caller should ignore it
+ * minus: error
+ */
+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+ int err;
+ aufs_bindex_t bend, bindex;
+ struct dentry *root, *h_dentry;
+ struct inode *inode, *h_inode;
+
+ root = sb->s_root;
+ bend = au_sbend(sb);
+ if (unlikely(bend >= 0
+ && au_find_dbindex(root, add->path.dentry) >= 0)) {
+ err = 1;
+ if (!remount) {
+ err = -EINVAL;
+ pr_err("%s duplicated\n", add->pathname);
+ }
+ goto out;
+ }
+
+ err = -ENOSPC; /* -E2BIG; */
+ if (unlikely(AUFS_BRANCH_MAX <= add->bindex
+ || AUFS_BRANCH_MAX - 1 <= bend)) {
+ pr_err("number of branches exceeded %s\n", add->pathname);
+ goto out;
+ }
+
+ err = -EDOM;
+ if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) {
+ pr_err("bad index %d\n", add->bindex);
+ goto out;
+ }
+
+ inode = d_inode(add->path.dentry);
+ err = -ENOENT;
+ if (unlikely(!inode->i_nlink)) {
+ pr_err("no existence %s\n", add->pathname);
+ goto out;
+ }
+
+ err = -EINVAL;
+ if (unlikely(inode->i_sb == sb)) {
+ pr_err("%s must be outside\n", add->pathname);
+ goto out;
+ }
+
+ if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) {
+ pr_err("unsupported filesystem, %s (%s)\n",
+ add->pathname, au_sbtype(inode->i_sb));
+ goto out;
+ }
+
+ if (unlikely(inode->i_sb->s_stack_depth)) {
+ pr_err("already stacked, %s (%s)\n",
+ add->pathname, au_sbtype(inode->i_sb));
+ goto out;
+ }
+
+ err = test_br(d_inode(add->path.dentry), add->perm, add->pathname);
+ if (unlikely(err))
+ goto out;
+
+ if (bend < 0)
+ return 0; /* success */
+
+ err = -EINVAL;
+ for (bindex = 0; bindex <= bend; bindex++)
+ if (unlikely(test_overlap(sb, add->path.dentry,
+ au_h_dptr(root, bindex)))) {
+ pr_err("%s is overlapped\n", add->pathname);
+ goto out;
+ }
+
+ err = 0;
+ if (au_opt_test(au_mntflags(sb), WARN_PERM)) {
+ h_dentry = au_h_dptr(root, 0);
+ h_inode = d_inode(h_dentry);
+ if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO)
+ || !uid_eq(h_inode->i_uid, inode->i_uid)
+ || !gid_eq(h_inode->i_gid, inode->i_gid))
+ pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n",
+ add->pathname,
+ i_uid_read(inode), i_gid_read(inode),
+ (inode->i_mode & S_IALLUGO),
+ i_uid_read(h_inode), i_gid_read(h_inode),
+ (h_inode->i_mode & S_IALLUGO));
+ }
+
+out:
+ return err;
+}
+
+/*
+ * initialize or clean the whiteouts for an adding branch
+ */
+static int au_br_init_wh(struct super_block *sb, struct au_branch *br,
+ int new_perm)
+{
+ int err, old_perm;
+ aufs_bindex_t bindex;
+ struct inode *h_inode;
+ struct au_wbr *wbr;
+ struct au_hinode *hdir;
+ struct dentry *h_dentry;
+
+ err = vfsub_mnt_want_write(au_br_mnt(br));
+ if (unlikely(err))
+ goto out;
+
+ wbr = br->br_wbr;
+ old_perm = br->br_perm;
+ br->br_perm = new_perm;
+ hdir = NULL;
+ h_inode = NULL;
+ bindex = au_br_index(sb, br->br_id);
+ if (0 <= bindex) {
+ hdir = au_hi(d_inode(sb->s_root), bindex);
+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+ } else {
+ h_dentry = au_br_dentry(br);
+ h_inode = d_inode(h_dentry);
+ inode_lock_nested(h_inode, AuLsc_I_PARENT);
+ }
+ if (!wbr)
+ err = au_wh_init(br, sb);
+ else {
+ wbr_wh_write_lock(wbr);
+ err = au_wh_init(br, sb);
+ wbr_wh_write_unlock(wbr);
+ }
+ if (hdir)
+ au_hn_imtx_unlock(hdir);
+ else
+ inode_unlock(h_inode);
+ vfsub_mnt_drop_write(au_br_mnt(br));
+ br->br_perm = old_perm;
+
+ if (!err && wbr && !au_br_writable(new_perm)) {
+ kfree(wbr);
+ br->br_wbr = NULL;
+ }
+
+out:
+ return err;
+}
+
+static int au_wbr_init(struct au_branch *br, struct super_block *sb,
+ int perm)
+{
+ int err;
+ struct kstatfs kst;
+ struct au_wbr *wbr;
+
+ wbr = br->br_wbr;
+ au_rw_init(&wbr->wbr_wh_rwsem);
+ atomic_set(&wbr->wbr_wh_running, 0);
+
+ /*
+ * a limit for rmdir/rename a dir
+ * cf. AUFS_MAX_NAMELEN in include/uapi/linux/aufs_type.h
+ */
+ err = vfs_statfs(&br->br_path, &kst);
+ if (unlikely(err))
+ goto out;
+ err = -EINVAL;
+ if (kst.f_namelen >= NAME_MAX)
+ err = au_br_init_wh(sb, br, perm);
+ else
+ pr_err("%pd(%s), unsupported namelen %ld\n",
+ au_br_dentry(br),
+ au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen);
+
+out:
+ return err;
+}
+
+/* initialize a new branch */
+static int au_br_init(struct au_branch *br, struct super_block *sb,
+ struct au_opt_add *add)
+{
+ int err;
+ struct inode *h_inode;
+
+ err = 0;
+ mutex_init(&br->br_xino.xi_nondir_mtx);
+ br->br_perm = add->perm;
+ br->br_path = add->path; /* set first, path_get() later */
+ spin_lock_init(&br->br_dykey_lock);
+ atomic_set(&br->br_count, 0);
+ atomic_set(&br->br_xino_running, 0);
+ br->br_id = au_new_br_id(sb);
+ AuDebugOn(br->br_id < 0);
+
+ if (au_br_writable(add->perm)) {
+ err = au_wbr_init(br, sb, add->perm);
+ if (unlikely(err))
+ goto out_err;
+ }
+
+ if (au_opt_test(au_mntflags(sb), XINO)) {
+ h_inode = d_inode(add->path.dentry);
+ err = au_xino_br(sb, br, h_inode->i_ino,
+ au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1);
+ if (unlikely(err)) {
+ AuDebugOn(br->br_xino.xi_file);
+ goto out_err;
+ }
+ }
+
+ sysaufs_br_init(br);
+ path_get(&br->br_path);
+ goto out; /* success */
+
+out_err:
+ memset(&br->br_path, 0, sizeof(br->br_path));
+out:
+ return err;
+}
+
+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex,
+ struct au_branch *br, aufs_bindex_t bend,
+ aufs_bindex_t amount)
+{
+ struct au_branch **brp;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ brp = sbinfo->si_branch + bindex;
+ memmove(brp + 1, brp, sizeof(*brp) * amount);
+ *brp = br;
+ sbinfo->si_bend++;
+ if (unlikely(bend < 0))
+ sbinfo->si_bend = 0;
+}
+
+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex,
+ aufs_bindex_t bend, aufs_bindex_t amount)
+{
+ struct au_hdentry *hdp;
+
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ hdp = dinfo->di_hdentry + bindex;
+ memmove(hdp + 1, hdp, sizeof(*hdp) * amount);
+ au_h_dentry_init(hdp);
+ dinfo->di_bend++;
+ if (unlikely(bend < 0))
+ dinfo->di_bstart = 0;
+}
+
+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex,
+ aufs_bindex_t bend, aufs_bindex_t amount)
+{
+ struct au_hinode *hip;
+
+ AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+ hip = iinfo->ii_hinode + bindex;
+ memmove(hip + 1, hip, sizeof(*hip) * amount);
+ hip->hi_inode = NULL;
+ au_hn_init(hip);
+ iinfo->ii_bend++;
+ if (unlikely(bend < 0))
+ iinfo->ii_bstart = 0;
+}
+
+static void au_br_do_add(struct super_block *sb, struct au_branch *br,
+ aufs_bindex_t bindex)
+{
+ struct dentry *root, *h_dentry;
+ struct inode *root_inode, *h_inode;
+ aufs_bindex_t bend, amount;
+
+ root = sb->s_root;
+ root_inode = d_inode(root);
+ bend = au_sbend(sb);
+ amount = bend + 1 - bindex;
+ h_dentry = au_br_dentry(br);
+ au_sbilist_lock();
+ au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount);
+ au_br_do_add_hdp(au_di(root), bindex, bend, amount);
+ au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount);
+ au_set_h_dptr(root, bindex, dget(h_dentry));
+ h_inode = d_inode(h_dentry);
+ au_set_h_iptr(root_inode, bindex, au_igrab(h_inode), /*flags*/0);
+ au_sbilist_unlock();
+}
+
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount)
+{
+ int err;
+ aufs_bindex_t bend, add_bindex;
+ struct dentry *root, *h_dentry;
+ struct inode *root_inode;
+ struct au_branch *add_branch;
+
+ root = sb->s_root;
+ root_inode = d_inode(root);
+ IMustLock(root_inode);
+ err = test_add(sb, add, remount);
+ if (unlikely(err < 0))
+ goto out;
+ if (err) {
+ err = 0;
+ goto out; /* success */
+ }
+
+ bend = au_sbend(sb);
+ add_branch = au_br_alloc(sb, bend + 2, add->perm);
+ err = PTR_ERR(add_branch);
+ if (IS_ERR(add_branch))
+ goto out;
+
+ err = au_br_init(add_branch, sb, add);
+ if (unlikely(err)) {
+ au_br_do_free(add_branch);
+ goto out;
+ }
+
+ add_bindex = add->bindex;
+ if (!remount)
+ au_br_do_add(sb, add_branch, add_bindex);
+ else {
+ sysaufs_brs_del(sb, add_bindex);
+ au_br_do_add(sb, add_branch, add_bindex);
+ sysaufs_brs_add(sb, add_bindex);
+ }
+
+ h_dentry = add->path.dentry;
+ if (!add_bindex) {
+ au_cpup_attr_all(root_inode, /*force*/1);
+ sb->s_maxbytes = h_dentry->d_sb->s_maxbytes;
+ } else
+ au_add_nlink(root_inode, d_inode(h_dentry));
+
+ /*
+ * this test/set prevents aufs from handling unnecesary notify events
+ * of xino files, in case of re-adding a writable branch which was
+ * once detached from aufs.
+ */
+ if (au_xino_brid(sb) < 0
+ && au_br_writable(add_branch->br_perm)
+ && !au_test_fs_bad_xino(h_dentry->d_sb)
+ && add_branch->br_xino.xi_file
+ && add_branch->br_xino.xi_file->f_path.dentry->d_parent == h_dentry)
+ au_xino_brid_set(sb, add_branch->br_id);
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static unsigned long long au_farray_cb(struct super_block *sb, void *a,
+ unsigned long long max __maybe_unused,
+ void *arg)
+{
+ unsigned long long n;
+ struct file **p, *f;
+ struct au_sphlhead *files;
+ struct au_finfo *finfo;
+
+ n = 0;
+ p = a;
+ files = &au_sbi(sb)->si_files;
+ spin_lock(&files->spin);
+ hlist_for_each_entry(finfo, &files->head, fi_hlist) {
+ f = finfo->fi_file;
+ if (file_count(f)
+ && !special_file(file_inode(f)->i_mode)) {
+ get_file(f);
+ *p++ = f;
+ n++;
+ AuDebugOn(n > max);
+ }
+ }
+ spin_unlock(&files->spin);
+
+ return n;
+}
+
+static struct file **au_farray_alloc(struct super_block *sb,
+ unsigned long long *max)
+{
+ *max = atomic_long_read(&au_sbi(sb)->si_nfiles);
+ return au_array_alloc(max, au_farray_cb, sb, /*arg*/NULL);
+}
+
+static void au_farray_free(struct file **a, unsigned long long max)
+{
+ unsigned long long ull;
+
+ for (ull = 0; ull < max; ull++)
+ if (a[ull])
+ fput(a[ull]);
+ kvfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * delete a branch
+ */
+
+/* to show the line number, do not make it inlined function */
+#define AuVerbose(do_info, fmt, ...) do { \
+ if (do_info) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int au_test_ibusy(struct inode *inode, aufs_bindex_t bstart,
+ aufs_bindex_t bend)
+{
+ return (inode && !S_ISDIR(inode->i_mode)) || bstart == bend;
+}
+
+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t bstart,
+ aufs_bindex_t bend)
+{
+ return au_test_ibusy(d_inode(dentry), bstart, bend);
+}
+
+/*
+ * test if the branch is deletable or not.
+ */
+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex,
+ unsigned int sigen, const unsigned int verbose)
+{
+ int err, i, j, ndentry;
+ aufs_bindex_t bstart, bend;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry *d;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, root, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ for (i = 0; !err && i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ ndentry = dpage->ndentry;
+ for (j = 0; !err && j < ndentry; j++) {
+ d = dpage->dentries[j];
+ AuDebugOn(au_dcount(d) <= 0);
+ if (!au_digen_test(d, sigen)) {
+ di_read_lock_child(d, AuLock_IR);
+ if (unlikely(au_dbrange_test(d))) {
+ di_read_unlock(d, AuLock_IR);
+ continue;
+ }
+ } else {
+ di_write_lock_child(d);
+ if (unlikely(au_dbrange_test(d))) {
+ di_write_unlock(d);
+ continue;
+ }
+ err = au_reval_dpath(d, sigen);
+ if (!err)
+ di_downgrade_lock(d, AuLock_IR);
+ else {
+ di_write_unlock(d);
+ break;
+ }
+ }
+
+ /* AuDbgDentry(d); */
+ bstart = au_dbstart(d);
+ bend = au_dbend(d);
+ if (bstart <= bindex
+ && bindex <= bend
+ && au_h_dptr(d, bindex)
+ && au_test_dbusy(d, bstart, bend)) {
+ err = -EBUSY;
+ AuVerbose(verbose, "busy %pd\n", d);
+ AuDbgDentry(d);
+ }
+ di_read_unlock(d, AuLock_IR);
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex,
+ unsigned int sigen, const unsigned int verbose)
+{
+ int err;
+ unsigned long long max, ull;
+ struct inode *i, **array;
+ aufs_bindex_t bstart, bend;
+
+ array = au_iarray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ err = 0;
+ AuDbg("b%d\n", bindex);
+ for (ull = 0; !err && ull < max; ull++) {
+ i = array[ull];
+ if (unlikely(!i))
+ break;
+ if (i->i_ino == AUFS_ROOT_INO)
+ continue;
+
+ /* AuDbgInode(i); */
+ if (au_iigen(i, NULL) == sigen)
+ ii_read_lock_child(i);
+ else {
+ ii_write_lock_child(i);
+ err = au_refresh_hinode_self(i);
+ au_iigen_dec(i);
+ if (!err)
+ ii_downgrade_lock(i);
+ else {
+ ii_write_unlock(i);
+ break;
+ }
+ }
+
+ bstart = au_ibstart(i);
+ bend = au_ibend(i);
+ if (bstart <= bindex
+ && bindex <= bend
+ && au_h_iptr(i, bindex)
+ && au_test_ibusy(i, bstart, bend)) {
+ err = -EBUSY;
+ AuVerbose(verbose, "busy i%lu\n", i->i_ino);
+ AuDbgInode(i);
+ }
+ ii_read_unlock(i);
+ }
+ au_iarray_free(array, max);
+
+out:
+ return err;
+}
+
+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex,
+ const unsigned int verbose)
+{
+ int err;
+ unsigned int sigen;
+
+ sigen = au_sigen(root->d_sb);
+ DiMustNoWaiters(root);
+ IiMustNoWaiters(d_inode(root));
+ di_write_unlock(root);
+ err = test_dentry_busy(root, bindex, sigen, verbose);
+ if (!err)
+ err = test_inode_busy(root->d_sb, bindex, sigen, verbose);
+ di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */
+
+ return err;
+}
+
+static int test_dir_busy(struct file *file, aufs_bindex_t br_id,
+ struct file **to_free, int *idx)
+{
+ int err;
+ unsigned char matched, root;
+ aufs_bindex_t bindex, bend;
+ struct au_fidir *fidir;
+ struct au_hfile *hfile;
+
+ err = 0;
+ root = IS_ROOT(file->f_path.dentry);
+ if (root) {
+ get_file(file);
+ to_free[*idx] = file;
+ (*idx)++;
+ goto out;
+ }
+
+ matched = 0;
+ fidir = au_fi(file)->fi_hdir;
+ AuDebugOn(!fidir);
+ bend = au_fbend_dir(file);
+ for (bindex = au_fbstart(file); bindex <= bend; bindex++) {
+ hfile = fidir->fd_hfile + bindex;
+ if (!hfile->hf_file)
+ continue;
+
+ if (hfile->hf_br->br_id == br_id) {
+ matched = 1;
+ break;
+ }
+ }
+ if (matched)
+ err = -EBUSY;
+
+out:
+ return err;
+}
+
+static int test_file_busy(struct super_block *sb, aufs_bindex_t br_id,
+ struct file **to_free, int opened)
+{
+ int err, idx;
+ unsigned long long ull, max;
+ aufs_bindex_t bstart;
+ struct file *file, **array;
+ struct dentry *root;
+ struct au_hfile *hfile;
+
+ array = au_farray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ err = 0;
+ idx = 0;
+ root = sb->s_root;
+ di_write_unlock(root);
+ for (ull = 0; ull < max; ull++) {
+ file = array[ull];
+ if (unlikely(!file))
+ break;
+
+ /* AuDbg("%pD\n", file); */
+ fi_read_lock(file);
+ bstart = au_fbstart(file);
+ if (!d_is_dir(file->f_path.dentry)) {
+ hfile = &au_fi(file)->fi_htop;
+ if (hfile->hf_br->br_id == br_id)
+ err = -EBUSY;
+ } else
+ err = test_dir_busy(file, br_id, to_free, &idx);
+ fi_read_unlock(file);
+ if (unlikely(err))
+ break;
+ }
+ di_write_lock_child(root);
+ au_farray_free(array, max);
+ AuDebugOn(idx > opened);
+
+out:
+ return err;
+}
+
+static void br_del_file(struct file **to_free, unsigned long long opened,
+ aufs_bindex_t br_id)
+{
+ unsigned long long ull;
+ aufs_bindex_t bindex, bstart, bend, bfound;
+ struct file *file;
+ struct au_fidir *fidir;
+ struct au_hfile *hfile;
+
+ for (ull = 0; ull < opened; ull++) {
+ file = to_free[ull];
+ if (unlikely(!file))
+ break;
+
+ /* AuDbg("%pD\n", file); */
+ AuDebugOn(!d_is_dir(file->f_path.dentry));
+ bfound = -1;
+ fidir = au_fi(file)->fi_hdir;
+ AuDebugOn(!fidir);
+ fi_write_lock(file);
+ bstart = au_fbstart(file);
+ bend = au_fbend_dir(file);
+ for (bindex = bstart; bindex <= bend; bindex++) {
+ hfile = fidir->fd_hfile + bindex;
+ if (!hfile->hf_file)
+ continue;
+
+ if (hfile->hf_br->br_id == br_id) {
+ bfound = bindex;
+ break;
+ }
+ }
+ AuDebugOn(bfound < 0);
+ au_set_h_fptr(file, bfound, NULL);
+ if (bfound == bstart) {
+ for (bstart++; bstart <= bend; bstart++)
+ if (au_hf_dir(file, bstart)) {
+ au_set_fbstart(file, bstart);
+ break;
+ }
+ }
+ fi_write_unlock(file);
+ }
+}
+
+static void au_br_do_del_brp(struct au_sbinfo *sbinfo,
+ const aufs_bindex_t bindex,
+ const aufs_bindex_t bend)
+{
+ struct au_branch **brp, **p;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ brp = sbinfo->si_branch + bindex;
+ if (bindex < bend)
+ memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex));
+ sbinfo->si_branch[0 + bend] = NULL;
+ sbinfo->si_bend--;
+
+ p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, AuGFP_SBILIST);
+ if (p)
+ sbinfo->si_branch = p;
+ /* harmless error */
+}
+
+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex,
+ const aufs_bindex_t bend)
+{
+ struct au_hdentry *hdp, *p;
+
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ hdp = dinfo->di_hdentry;
+ if (bindex < bend)
+ memmove(hdp + bindex, hdp + bindex + 1,
+ sizeof(*hdp) * (bend - bindex));
+ hdp[0 + bend].hd_dentry = NULL;
+ dinfo->di_bend--;
+
+ p = krealloc(hdp, sizeof(*p) * bend, AuGFP_SBILIST);
+ if (p)
+ dinfo->di_hdentry = p;
+ /* harmless error */
+}
+
+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex,
+ const aufs_bindex_t bend)
+{
+ struct au_hinode *hip, *p;
+
+ AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+ hip = iinfo->ii_hinode + bindex;
+ if (bindex < bend)
+ memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex));
+ iinfo->ii_hinode[0 + bend].hi_inode = NULL;
+ au_hn_init(iinfo->ii_hinode + bend);
+ iinfo->ii_bend--;
+
+ p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, AuGFP_SBILIST);
+ if (p)
+ iinfo->ii_hinode = p;
+ /* harmless error */
+}
+
+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_branch *br)
+{
+ aufs_bindex_t bend;
+ struct au_sbinfo *sbinfo;
+ struct dentry *root, *h_root;
+ struct inode *inode, *h_inode;
+ struct au_hinode *hinode;
+
+ SiMustWriteLock(sb);
+
+ root = sb->s_root;
+ inode = d_inode(root);
+ sbinfo = au_sbi(sb);
+ bend = sbinfo->si_bend;
+
+ h_root = au_h_dptr(root, bindex);
+ hinode = au_hi(inode, bindex);
+ h_inode = au_igrab(hinode->hi_inode);
+ au_hiput(hinode);
+
+ au_sbilist_lock();
+ au_br_do_del_brp(sbinfo, bindex, bend);
+ au_br_do_del_hdp(au_di(root), bindex, bend);
+ au_br_do_del_hip(au_ii(inode), bindex, bend);
+ au_sbilist_unlock();
+
+ dput(h_root);
+ iput(h_inode);
+ au_br_do_free(br);
+}
+
+static unsigned long long empty_cb(struct super_block *sb, void *array,
+ unsigned long long max, void *arg)
+{
+ return max;
+}
+
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount)
+{
+ int err, rerr, i;
+ unsigned long long opened;
+ unsigned int mnt_flags;
+ aufs_bindex_t bindex, bend, br_id;
+ unsigned char do_wh, verbose;
+ struct au_branch *br;
+ struct au_wbr *wbr;
+ struct dentry *root;
+ struct file **to_free;
+
+ err = 0;
+ opened = 0;
+ to_free = NULL;
+ root = sb->s_root;
+ bindex = au_find_dbindex(root, del->h_path.dentry);
+ if (bindex < 0) {
+ if (remount)
+ goto out; /* success */
+ err = -ENOENT;
+ pr_err("%s no such branch\n", del->pathname);
+ goto out;
+ }
+ AuDbg("bindex b%d\n", bindex);
+
+ err = -EBUSY;
+ mnt_flags = au_mntflags(sb);
+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
+ bend = au_sbend(sb);
+ if (unlikely(!bend)) {
+ AuVerbose(verbose, "no more branches left\n");
+ goto out;
+ }
+ br = au_sbr(sb, bindex);
+ AuDebugOn(!path_equal(&br->br_path, &del->h_path));
+
+ br_id = br->br_id;
+ opened = atomic_read(&br->br_count);
+ if (unlikely(opened)) {
+ to_free = au_array_alloc(&opened, empty_cb, sb, NULL);
+ err = PTR_ERR(to_free);
+ if (IS_ERR(to_free))
+ goto out;
+
+ err = test_file_busy(sb, br_id, to_free, opened);
+ if (unlikely(err)) {
+ AuVerbose(verbose, "%llu file(s) opened\n", opened);
+ goto out;
+ }
+ }
+
+ wbr = br->br_wbr;
+ do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph);
+ if (do_wh) {
+ /* instead of WbrWhMustWriteLock(wbr) */
+ SiMustWriteLock(sb);
+ for (i = 0; i < AuBrWh_Last; i++) {
+ dput(wbr->wbr_wh[i]);
+ wbr->wbr_wh[i] = NULL;
+ }
+ }
+
+ err = test_children_busy(root, bindex, verbose);
+ if (unlikely(err)) {
+ if (do_wh)
+ goto out_wh;
+ goto out;
+ }
+
+ err = 0;
+ if (to_free) {
+ /*
+ * now we confirmed the branch is deletable.
+ * let's free the remaining opened dirs on the branch.
+ */
+ di_write_unlock(root);
+ br_del_file(to_free, opened, br_id);
+ di_write_lock_child(root);
+ }
+
+ if (!remount)
+ au_br_do_del(sb, bindex, br);
+ else {
+ sysaufs_brs_del(sb, bindex);
+ au_br_do_del(sb, bindex, br);
+ sysaufs_brs_add(sb, bindex);
+ }
+
+ if (!bindex) {
+ au_cpup_attr_all(d_inode(root), /*force*/1);
+ sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes;
+ } else
+ au_sub_nlink(d_inode(root), d_inode(del->h_path.dentry));
+ if (au_opt_test(mnt_flags, PLINK))
+ au_plink_half_refresh(sb, br_id);
+
+ if (au_xino_brid(sb) == br_id)
+ au_xino_brid_set(sb, -1);
+ goto out; /* success */
+
+out_wh:
+ /* revert */
+ rerr = au_br_init_wh(sb, br, br->br_perm);
+ if (rerr)
+ pr_warn("failed re-creating base whiteout, %s. (%d)\n",
+ del->pathname, rerr);
+out:
+ if (to_free)
+ au_farray_free(to_free, opened);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg)
+{
+ int err;
+ aufs_bindex_t bstart, bend;
+ struct aufs_ibusy ibusy;
+ struct inode *inode, *h_inode;
+
+ err = -EPERM;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = copy_from_user(&ibusy, arg, sizeof(ibusy));
+ if (!err)
+ err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+
+ err = -EINVAL;
+ si_read_lock(sb, AuLock_FLUSH);
+ if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb)))
+ goto out_unlock;
+
+ err = 0;
+ ibusy.h_ino = 0; /* invalid */
+ inode = ilookup(sb, ibusy.ino);
+ if (!inode
+ || inode->i_ino == AUFS_ROOT_INO
+ || is_bad_inode(inode))
+ goto out_unlock;
+
+ ii_read_lock_child(inode);
+ bstart = au_ibstart(inode);
+ bend = au_ibend(inode);
+ if (bstart <= ibusy.bindex && ibusy.bindex <= bend) {
+ h_inode = au_h_iptr(inode, ibusy.bindex);
+ if (h_inode && au_test_ibusy(inode, bstart, bend))
+ ibusy.h_ino = h_inode->i_ino;
+ }
+ ii_read_unlock(inode);
+ iput(inode);
+
+out_unlock:
+ si_read_unlock(sb);
+ if (!err) {
+ err = __put_user(ibusy.h_ino, &arg->h_ino);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ }
+ }
+out:
+ return err;
+}
+
+long au_ibusy_ioctl(struct file *file, unsigned long arg)
+{
+ return au_ibusy(file->f_path.dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg)
+{
+ return au_ibusy(file->f_path.dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * change a branch permission
+ */
+
+static void au_warn_ima(void)
+{
+#ifdef CONFIG_IMA
+ /* since it doesn't support mark_files_ro() */
+ AuWarn1("RW -> RO makes IMA to produce wrong message\n");
+#endif
+}
+
+static int do_need_sigen_inc(int a, int b)
+{
+ return au_br_whable(a) && !au_br_whable(b);
+}
+
+static int need_sigen_inc(int old, int new)
+{
+ return do_need_sigen_inc(old, new)
+ || do_need_sigen_inc(new, old);
+}
+
+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex)
+{
+ int err, do_warn;
+ unsigned int mnt_flags;
+ unsigned long long ull, max;
+ aufs_bindex_t br_id;
+ unsigned char verbose, writer;
+ struct file *file, *hf, **array;
+ struct au_hfile *hfile;
+
+ mnt_flags = au_mntflags(sb);
+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
+
+ array = au_farray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ do_warn = 0;
+ br_id = au_sbr_id(sb, bindex);
+ for (ull = 0; ull < max; ull++) {
+ file = array[ull];
+ if (unlikely(!file))
+ break;
+
+ /* AuDbg("%pD\n", file); */
+ fi_read_lock(file);
+ if (unlikely(au_test_mmapped(file))) {
+ err = -EBUSY;
+ AuVerbose(verbose, "mmapped %pD\n", file);
+ AuDbgFile(file);
+ FiMustNoWaiters(file);
+ fi_read_unlock(file);
+ goto out_array;
+ }
+
+ hfile = &au_fi(file)->fi_htop;
+ hf = hfile->hf_file;
+ if (!d_is_reg(file->f_path.dentry)
+ || !(file->f_mode & FMODE_WRITE)
+ || hfile->hf_br->br_id != br_id
+ || !(hf->f_mode & FMODE_WRITE))
+ array[ull] = NULL;
+ else {
+ do_warn = 1;
+ get_file(file);
+ }
+
+ FiMustNoWaiters(file);
+ fi_read_unlock(file);
+ fput(file);
+ }
+
+ err = 0;
+ if (do_warn)
+ au_warn_ima();
+
+ for (ull = 0; ull < max; ull++) {
+ file = array[ull];
+ if (!file)
+ continue;
+
+ /* todo: already flushed? */
+ /*
+ * fs/super.c:mark_files_ro() is gone, but aufs keeps its
+ * approach which resets f_mode and calls mnt_drop_write() and
+ * file_release_write() for each file, because the branch
+ * attribute in aufs world is totally different from the native
+ * fs rw/ro mode.
+ */
+ /* fi_read_lock(file); */
+ hfile = &au_fi(file)->fi_htop;
+ hf = hfile->hf_file;
+ /* fi_read_unlock(file); */
+ spin_lock(&hf->f_lock);
+ writer = !!(hf->f_mode & FMODE_WRITER);
+ hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER);
+ spin_unlock(&hf->f_lock);
+ if (writer) {
+ put_write_access(file_inode(hf));
+ __mnt_drop_write(hf->f_path.mnt);
+ }
+ }
+
+out_array:
+ au_farray_free(array, max);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+ int *do_refresh)
+{
+ int err, rerr;
+ aufs_bindex_t bindex;
+ struct dentry *root;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ root = sb->s_root;
+ bindex = au_find_dbindex(root, mod->h_root);
+ if (bindex < 0) {
+ if (remount)
+ return 0; /* success */
+ err = -ENOENT;
+ pr_err("%s no such branch\n", mod->path);
+ goto out;
+ }
+ AuDbg("bindex b%d\n", bindex);
+
+ err = test_br(d_inode(mod->h_root), mod->perm, mod->path);
+ if (unlikely(err))
+ goto out;
+
+ br = au_sbr(sb, bindex);
+ AuDebugOn(mod->h_root != au_br_dentry(br));
+ if (br->br_perm == mod->perm)
+ return 0; /* success */
+
+ /* pre-allocate for non-fhsm --> fhsm */
+ bf = NULL;
+ if (!au_br_fhsm(br->br_perm) && au_br_fhsm(mod->perm)) {
+ err = au_fhsm_br_alloc(br);
+ if (unlikely(err))
+ goto out;
+ bf = br->br_fhsm;
+ br->br_fhsm = NULL;
+ }
+
+ if (au_br_writable(br->br_perm)) {
+ /* remove whiteout base */
+ err = au_br_init_wh(sb, br, mod->perm);
+ if (unlikely(err))
+ goto out_bf;
+
+ if (!au_br_writable(mod->perm)) {
+ /* rw --> ro, file might be mmapped */
+ DiMustNoWaiters(root);
+ IiMustNoWaiters(d_inode(root));
+ di_write_unlock(root);
+ err = au_br_mod_files_ro(sb, bindex);
+ /* aufs_write_lock() calls ..._child() */
+ di_write_lock_child(root);
+
+ if (unlikely(err)) {
+ rerr = -ENOMEM;
+ br->br_wbr = kzalloc(sizeof(*br->br_wbr),
+ GFP_NOFS);
+ if (br->br_wbr)
+ rerr = au_wbr_init(br, sb, br->br_perm);
+ if (unlikely(rerr)) {
+ AuIOErr("nested error %d (%d)\n",
+ rerr, err);
+ br->br_perm = mod->perm;
+ }
+ }
+ }
+ } else if (au_br_writable(mod->perm)) {
+ /* ro --> rw */
+ err = -ENOMEM;
+ br->br_wbr = kzalloc(sizeof(*br->br_wbr), GFP_NOFS);
+ if (br->br_wbr) {
+ err = au_wbr_init(br, sb, mod->perm);
+ if (unlikely(err)) {
+ kfree(br->br_wbr);
+ br->br_wbr = NULL;
+ }
+ }
+ }
+ if (unlikely(err))
+ goto out_bf;
+
+ if (au_br_fhsm(br->br_perm)) {
+ if (!au_br_fhsm(mod->perm)) {
+ /* fhsm --> non-fhsm */
+ au_br_fhsm_fin(br->br_fhsm);
+ kfree(br->br_fhsm);
+ br->br_fhsm = NULL;
+ }
+ } else if (au_br_fhsm(mod->perm))
+ /* non-fhsm --> fhsm */
+ br->br_fhsm = bf;
+
+ *do_refresh |= need_sigen_inc(br->br_perm, mod->perm);
+ br->br_perm = mod->perm;
+ goto out; /* success */
+
+out_bf:
+ kfree(bf);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs)
+{
+ int err;
+ struct kstatfs kstfs;
+
+ err = vfs_statfs(&br->br_path, &kstfs);
+ if (!err) {
+ stfs->f_blocks = kstfs.f_blocks;
+ stfs->f_bavail = kstfs.f_bavail;
+ stfs->f_files = kstfs.f_files;
+ stfs->f_ffree = kstfs.f_ffree;
+ }
+
+ return err;
+}
diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h
new file mode 100644
index 000000000..4c52ae166
--- /dev/null
+++ b/fs/aufs/branch.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * branch filesystems and xino for them
+ */
+
+#ifndef __AUFS_BRANCH_H__
+#define __AUFS_BRANCH_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mount.h>
+#include "dynop.h"
+#include "rwsem.h"
+#include "super.h"
+
+/* ---------------------------------------------------------------------- */
+
+/* a xino file */
+struct au_xino_file {
+ struct file *xi_file;
+ struct mutex xi_nondir_mtx;
+
+ /* todo: make xino files an array to support huge inode number */
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xi_dbgaufs;
+#endif
+};
+
+/* File-based Hierarchical Storage Management */
+struct au_br_fhsm {
+#ifdef CONFIG_AUFS_FHSM
+ struct mutex bf_lock;
+ unsigned long bf_jiffy;
+ struct aufs_stfs bf_stfs;
+ int bf_readable;
+#endif
+};
+
+/* members for writable branch only */
+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last};
+struct au_wbr {
+ struct au_rwsem wbr_wh_rwsem;
+ struct dentry *wbr_wh[AuBrWh_Last];
+ atomic_t wbr_wh_running;
+#define wbr_whbase wbr_wh[AuBrWh_BASE] /* whiteout base */
+#define wbr_plink wbr_wh[AuBrWh_PLINK] /* pseudo-link dir */
+#define wbr_orph wbr_wh[AuBrWh_ORPH] /* dir for orphans */
+
+ /* mfs mode */
+ unsigned long long wbr_bytes;
+};
+
+/* ext2 has 3 types of operations at least, ext3 has 4 */
+#define AuBrDynOp (AuDyLast * 4)
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+/* support for asynchronous destruction */
+struct au_br_hfsnotify {
+ struct fsnotify_group *hfsn_group;
+};
+#endif
+
+/* sysfs entries */
+struct au_brsysfs {
+ char name[16];
+ struct attribute attr;
+};
+
+enum {
+ AuBrSysfs_BR,
+ AuBrSysfs_BRID,
+ AuBrSysfs_Last
+};
+
+/* protected by superblock rwsem */
+struct au_branch {
+ struct au_xino_file br_xino;
+
+ aufs_bindex_t br_id;
+
+ int br_perm;
+ struct path br_path;
+ spinlock_t br_dykey_lock;
+ struct au_dykey *br_dykey[AuBrDynOp];
+ atomic_t br_count;
+
+ struct au_wbr *br_wbr;
+ struct au_br_fhsm *br_fhsm;
+
+ /* xino truncation */
+ atomic_t br_xino_running;
+
+#ifdef CONFIG_AUFS_HFSNOTIFY
+ struct au_br_hfsnotify *br_hfsn;
+#endif
+
+#ifdef CONFIG_SYSFS
+ /* entries under sysfs per mount-point */
+ struct au_brsysfs br_sysfs[AuBrSysfs_Last];
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct vfsmount *au_br_mnt(struct au_branch *br)
+{
+ return br->br_path.mnt;
+}
+
+static inline struct dentry *au_br_dentry(struct au_branch *br)
+{
+ return br->br_path.dentry;
+}
+
+static inline struct super_block *au_br_sb(struct au_branch *br)
+{
+ return au_br_mnt(br)->mnt_sb;
+}
+
+static inline int au_br_rdonly(struct au_branch *br)
+{
+ return ((au_br_sb(br)->s_flags & MS_RDONLY)
+ || !au_br_writable(br->br_perm))
+ ? -EROFS : 0;
+}
+
+static inline int au_br_hnotifyable(int brperm __maybe_unused)
+{
+#ifdef CONFIG_AUFS_HNOTIFY
+ return !(brperm & AuBrPerm_RR);
+#else
+ return 0;
+#endif
+}
+
+static inline int au_br_test_oflag(int oflag, struct au_branch *br)
+{
+ int err, exec_flag;
+
+ err = 0;
+ exec_flag = oflag & __FMODE_EXEC;
+ if (unlikely(exec_flag && (au_br_mnt(br)->mnt_flags & MNT_NOEXEC)))
+ err = -EACCES;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* branch.c */
+struct au_sbinfo;
+void au_br_free(struct au_sbinfo *sinfo);
+int au_br_index(struct super_block *sb, aufs_bindex_t br_id);
+struct au_opt_add;
+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount);
+struct au_opt_del;
+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount);
+long au_ibusy_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+struct au_opt_mod;
+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount,
+ int *do_refresh);
+struct aufs_stfs;
+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs);
+
+/* xino.c */
+static const loff_t au_loff_max = LLONG_MAX;
+
+int au_xib_trunc(struct super_block *sb);
+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *buf, size_t size,
+ loff_t *pos);
+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos);
+struct file *au_xino_create2(struct file *base_file, struct file *copy_src);
+struct file *au_xino_create(struct super_block *sb, char *fname, int silent);
+ino_t au_xino_new_ino(struct super_block *sb);
+void au_xino_delete_inode(struct inode *inode, const int unlinked);
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t ino);
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t *ino);
+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino,
+ struct file *base_file, int do_test);
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex);
+
+struct au_opt_xino;
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount);
+void au_xino_clr(struct super_block *sb);
+struct file *au_xino_def(struct super_block *sb);
+int au_xino_path(struct seq_file *seq, struct file *file);
+
+/* ---------------------------------------------------------------------- */
+
+/* Superblock to branch */
+static inline
+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_sbr(sb, bindex)->br_id;
+}
+
+static inline
+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_br_mnt(au_sbr(sb, bindex));
+}
+
+static inline
+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_br_sb(au_sbr(sb, bindex));
+}
+
+static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex)
+{
+ atomic_dec(&au_sbr(sb, bindex)->br_count);
+}
+
+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_sbr(sb, bindex)->br_perm;
+}
+
+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex)
+{
+ return au_br_whable(au_sbr_perm(sb, bindex));
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * wbr_wh_read_lock, wbr_wh_write_lock
+ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock
+ */
+AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem);
+
+#define WbrWhMustNoWaiters(wbr) AuRwMustNoWaiters(&wbr->wbr_wh_rwsem)
+#define WbrWhMustAnyLock(wbr) AuRwMustAnyLock(&wbr->wbr_wh_rwsem)
+#define WbrWhMustWriteLock(wbr) AuRwMustWriteLock(&wbr->wbr_wh_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_FHSM
+static inline void au_br_fhsm_init(struct au_br_fhsm *brfhsm)
+{
+ mutex_init(&brfhsm->bf_lock);
+ brfhsm->bf_jiffy = 0;
+ brfhsm->bf_readable = 0;
+}
+
+static inline void au_br_fhsm_fin(struct au_br_fhsm *brfhsm)
+{
+ mutex_destroy(&brfhsm->bf_lock);
+}
+#else
+AuStubVoid(au_br_fhsm_init, struct au_br_fhsm *brfhsm)
+AuStubVoid(au_br_fhsm_fin, struct au_br_fhsm *brfhsm)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_BRANCH_H__ */
diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c
new file mode 100644
index 000000000..9ff0963ad
--- /dev/null
+++ b/fs/aufs/cpup.c
@@ -0,0 +1,1366 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * copy-up functions, see wbr_policy.c for copy-down
+ */
+
+#include <linux/fs_stack.h>
+#include <linux/mm.h>
+#include <linux/task_work.h>
+#include "aufs.h"
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags)
+{
+ const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE
+ | S_NOATIME | S_NOCMTIME | S_AUTOMOUNT;
+
+ BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags));
+
+ dst->i_flags |= iflags & ~mask;
+ if (au_test_fs_notime(dst->i_sb))
+ dst->i_flags |= S_NOATIME | S_NOCMTIME;
+}
+
+void au_cpup_attr_timesizes(struct inode *inode)
+{
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, au_ibstart(inode));
+ fsstack_copy_attr_times(inode, h_inode);
+ fsstack_copy_inode_size(inode, h_inode);
+}
+
+void au_cpup_attr_nlink(struct inode *inode, int force)
+{
+ struct inode *h_inode;
+ struct super_block *sb;
+ aufs_bindex_t bindex, bend;
+
+ sb = inode->i_sb;
+ bindex = au_ibstart(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (!force
+ && !S_ISDIR(h_inode->i_mode)
+ && au_opt_test(au_mntflags(sb), PLINK)
+ && au_plink_test(inode))
+ return;
+
+ /*
+ * 0 can happen in revalidating.
+ * h_inode->i_mutex may not be held here, but it is harmless since once
+ * i_nlink reaches 0, it will never become positive except O_TMPFILE
+ * case.
+ * todo: O_TMPFILE+linkat(AT_SYMLINK_FOLLOW) bypassing aufs may cause
+ * the incorrect link count.
+ */
+ set_nlink(inode, h_inode->i_nlink);
+
+ /*
+ * fewer nlink makes find(1) noisy, but larger nlink doesn't.
+ * it may includes whplink directory.
+ */
+ if (S_ISDIR(h_inode->i_mode)) {
+ bend = au_ibend(inode);
+ for (bindex++; bindex <= bend; bindex++) {
+ h_inode = au_h_iptr(inode, bindex);
+ if (h_inode)
+ au_add_nlink(inode, h_inode);
+ }
+ }
+}
+
+void au_cpup_attr_changeable(struct inode *inode)
+{
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, au_ibstart(inode));
+ inode->i_mode = h_inode->i_mode;
+ inode->i_uid = h_inode->i_uid;
+ inode->i_gid = h_inode->i_gid;
+ au_cpup_attr_timesizes(inode);
+ au_cpup_attr_flags(inode, h_inode->i_flags);
+}
+
+void au_cpup_igen(struct inode *inode, struct inode *h_inode)
+{
+ struct au_iinfo *iinfo = au_ii(inode);
+
+ IiMustWriteLock(inode);
+
+ iinfo->ii_higen = h_inode->i_generation;
+ iinfo->ii_hsb1 = h_inode->i_sb;
+}
+
+void au_cpup_attr_all(struct inode *inode, int force)
+{
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, au_ibstart(inode));
+ au_cpup_attr_changeable(inode);
+ if (inode->i_nlink > 0)
+ au_cpup_attr_nlink(inode, force);
+ inode->i_rdev = h_inode->i_rdev;
+ inode->i_blkbits = h_inode->i_blkbits;
+ au_cpup_igen(inode, h_inode);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */
+
+/* keep the timestamps of the parent dir when cpup */
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+ struct path *h_path)
+{
+ struct inode *h_inode;
+
+ dt->dt_dentry = dentry;
+ dt->dt_h_path = *h_path;
+ h_inode = d_inode(h_path->dentry);
+ dt->dt_atime = h_inode->i_atime;
+ dt->dt_mtime = h_inode->i_mtime;
+ /* smp_mb(); */
+}
+
+void au_dtime_revert(struct au_dtime *dt)
+{
+ struct iattr attr;
+ int err;
+
+ attr.ia_atime = dt->dt_atime;
+ attr.ia_mtime = dt->dt_mtime;
+ attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET
+ | ATTR_ATIME | ATTR_ATIME_SET;
+
+ /* no delegation since this is a directory */
+ err = vfsub_notify_change(&dt->dt_h_path, &attr, /*delegated*/NULL);
+ if (unlikely(err))
+ pr_warn("restoring timestamps failed(%d). ignored\n", err);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* internal use only */
+struct au_cpup_reg_attr {
+ int valid;
+ struct kstat st;
+ unsigned int iflags; /* inode->i_flags */
+};
+
+static noinline_for_stack
+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src,
+ struct au_cpup_reg_attr *h_src_attr)
+{
+ int err, sbits, icex;
+ unsigned int mnt_flags;
+ unsigned char verbose;
+ struct iattr ia;
+ struct path h_path;
+ struct inode *h_isrc, *h_idst;
+ struct kstat *h_st;
+ struct au_branch *br;
+
+ h_path.dentry = au_h_dptr(dst, bindex);
+ h_idst = d_inode(h_path.dentry);
+ br = au_sbr(dst->d_sb, bindex);
+ h_path.mnt = au_br_mnt(br);
+ h_isrc = d_inode(h_src);
+ ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID
+ | ATTR_ATIME | ATTR_MTIME
+ | ATTR_ATIME_SET | ATTR_MTIME_SET;
+ if (h_src_attr && h_src_attr->valid) {
+ h_st = &h_src_attr->st;
+ ia.ia_uid = h_st->uid;
+ ia.ia_gid = h_st->gid;
+ ia.ia_atime = h_st->atime;
+ ia.ia_mtime = h_st->mtime;
+ if (h_idst->i_mode != h_st->mode
+ && !S_ISLNK(h_idst->i_mode)) {
+ ia.ia_valid |= ATTR_MODE;
+ ia.ia_mode = h_st->mode;
+ }
+ sbits = !!(h_st->mode & (S_ISUID | S_ISGID));
+ au_cpup_attr_flags(h_idst, h_src_attr->iflags);
+ } else {
+ ia.ia_uid = h_isrc->i_uid;
+ ia.ia_gid = h_isrc->i_gid;
+ ia.ia_atime = h_isrc->i_atime;
+ ia.ia_mtime = h_isrc->i_mtime;
+ if (h_idst->i_mode != h_isrc->i_mode
+ && !S_ISLNK(h_idst->i_mode)) {
+ ia.ia_valid |= ATTR_MODE;
+ ia.ia_mode = h_isrc->i_mode;
+ }
+ sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID));
+ au_cpup_attr_flags(h_idst, h_isrc->i_flags);
+ }
+ /* no delegation since it is just created */
+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
+
+ /* is this nfs only? */
+ if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) {
+ ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+ ia.ia_mode = h_isrc->i_mode;
+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL);
+ }
+
+ icex = br->br_perm & AuBrAttr_ICEX;
+ if (!err) {
+ mnt_flags = au_mntflags(dst->d_sb);
+ verbose = !!au_opt_test(mnt_flags, VERBOSE);
+ err = au_cpup_xattr(h_path.dentry, h_src, icex, verbose);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
+ char *buf, unsigned long blksize)
+{
+ int err;
+ size_t sz, rbytes, wbytes;
+ unsigned char all_zero;
+ char *p, *zp;
+ struct inode *h_inode;
+ /* reduce stack usage */
+ struct iattr *ia;
+
+ zp = page_address(ZERO_PAGE(0));
+ if (unlikely(!zp))
+ return -ENOMEM; /* possible? */
+
+ err = 0;
+ all_zero = 0;
+ while (len) {
+ AuDbg("len %lld\n", len);
+ sz = blksize;
+ if (len < blksize)
+ sz = len;
+
+ rbytes = 0;
+ /* todo: signal_pending? */
+ while (!rbytes || err == -EAGAIN || err == -EINTR) {
+ rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
+ err = rbytes;
+ }
+ if (unlikely(err < 0))
+ break;
+
+ all_zero = 0;
+ if (len >= rbytes && rbytes == blksize)
+ all_zero = !memcmp(buf, zp, rbytes);
+ if (!all_zero) {
+ wbytes = rbytes;
+ p = buf;
+ while (wbytes) {
+ size_t b;
+
+ b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
+ err = b;
+ /* todo: signal_pending? */
+ if (unlikely(err == -EAGAIN || err == -EINTR))
+ continue;
+ if (unlikely(err < 0))
+ break;
+ wbytes -= b;
+ p += b;
+ }
+ if (unlikely(err < 0))
+ break;
+ } else {
+ loff_t res;
+
+ AuLabel(hole);
+ res = vfsub_llseek(dst, rbytes, SEEK_CUR);
+ err = res;
+ if (unlikely(res < 0))
+ break;
+ }
+ len -= rbytes;
+ err = 0;
+ }
+
+ /* the last block may be a hole */
+ if (!err && all_zero) {
+ AuLabel(last hole);
+
+ err = 1;
+ if (au_test_nfs(dst->f_path.dentry->d_sb)) {
+ /* nfs requires this step to make last hole */
+ /* is this only nfs? */
+ do {
+ /* todo: signal_pending? */
+ err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
+ } while (err == -EAGAIN || err == -EINTR);
+ if (err == 1)
+ dst->f_pos--;
+ }
+
+ if (err == 1) {
+ ia = (void *)buf;
+ ia->ia_size = dst->f_pos;
+ ia->ia_valid = ATTR_SIZE | ATTR_FILE;
+ ia->ia_file = dst;
+ h_inode = file_inode(dst);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD2);
+ /* no delegation since it is just created */
+ err = vfsub_notify_change(&dst->f_path, ia,
+ /*delegated*/NULL);
+ inode_unlock(h_inode);
+ }
+ }
+
+ return err;
+}
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len)
+{
+ int err;
+ unsigned long blksize;
+ unsigned char do_kfree;
+ char *buf;
+
+ err = -ENOMEM;
+ blksize = dst->f_path.dentry->d_sb->s_blocksize;
+ if (!blksize || PAGE_SIZE < blksize)
+ blksize = PAGE_SIZE;
+ AuDbg("blksize %lu\n", blksize);
+ do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *));
+ if (do_kfree)
+ buf = kmalloc(blksize, GFP_NOFS);
+ else
+ buf = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!buf))
+ goto out;
+
+ if (len > (1 << 22))
+ AuDbg("copying a large file %lld\n", (long long)len);
+
+ src->f_pos = 0;
+ dst->f_pos = 0;
+ err = au_do_copy_file(dst, src, len, buf, blksize);
+ if (do_kfree)
+ kfree(buf);
+ else
+ free_page((unsigned long)buf);
+
+out:
+ return err;
+}
+
+/*
+ * to support a sparse file which is opened with O_APPEND,
+ * we need to close the file.
+ */
+static int au_cp_regular(struct au_cp_generic *cpg)
+{
+ int err, i;
+ enum { SRC, DST };
+ struct {
+ aufs_bindex_t bindex;
+ unsigned int flags;
+ struct dentry *dentry;
+ int force_wr;
+ struct file *file;
+ void *label;
+ } *f, file[] = {
+ {
+ .bindex = cpg->bsrc,
+ .flags = O_RDONLY | O_NOATIME | O_LARGEFILE,
+ .label = &&out
+ },
+ {
+ .bindex = cpg->bdst,
+ .flags = O_WRONLY | O_NOATIME | O_LARGEFILE,
+ .force_wr = !!au_ftest_cpup(cpg->flags, RWDST),
+ .label = &&out_src
+ }
+ };
+ struct super_block *sb;
+ struct task_struct *tsk = current;
+
+ /* bsrc branch can be ro/rw. */
+ sb = cpg->dentry->d_sb;
+ f = file;
+ for (i = 0; i < 2; i++, f++) {
+ f->dentry = au_h_dptr(cpg->dentry, f->bindex);
+ f->file = au_h_open(cpg->dentry, f->bindex, f->flags,
+ /*file*/NULL, f->force_wr);
+ err = PTR_ERR(f->file);
+ if (IS_ERR(f->file))
+ goto *f->label;
+ }
+
+ /* try stopping to update while we copyup */
+ IMustLock(d_inode(file[SRC].dentry));
+ err = au_copy_file(file[DST].file, file[SRC].file, cpg->len);
+
+ /* i wonder if we had O_NO_DELAY_FPUT flag */
+ if (tsk->flags & PF_KTHREAD)
+ __fput_sync(file[DST].file);
+ else {
+ WARN(1, "%pD\nPlease report this warning to aufs-users ML",
+ file[DST].file);
+ fput(file[DST].file);
+ /*
+ * too bad.
+ * we have to call both since we don't know which place the file
+ * was added to.
+ */
+ task_work_run();
+ flush_delayed_fput();
+ }
+ au_sbr_put(sb, file[DST].bindex);
+
+out_src:
+ fput(file[SRC].file);
+ au_sbr_put(sb, file[SRC].bindex);
+out:
+ return err;
+}
+
+static int au_do_cpup_regular(struct au_cp_generic *cpg,
+ struct au_cpup_reg_attr *h_src_attr)
+{
+ int err, rerr;
+ loff_t l;
+ struct path h_path;
+ struct inode *h_src_inode, *h_dst_inode;
+
+ err = 0;
+ h_src_inode = au_h_iptr(d_inode(cpg->dentry), cpg->bsrc);
+ l = i_size_read(h_src_inode);
+ if (cpg->len == -1 || l < cpg->len)
+ cpg->len = l;
+ if (cpg->len) {
+ /* try stopping to update while we are referencing */
+ inode_lock_nested(h_src_inode, AuLsc_I_CHILD);
+ au_pin_hdir_unlock(cpg->pin);
+
+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+ h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc);
+ h_src_attr->iflags = h_src_inode->i_flags;
+ if (!au_test_nfs(h_src_inode->i_sb))
+ err = vfs_getattr(&h_path, &h_src_attr->st);
+ else {
+ inode_unlock(h_src_inode);
+ err = vfs_getattr(&h_path, &h_src_attr->st);
+ inode_lock_nested(h_src_inode, AuLsc_I_CHILD);
+ }
+ if (unlikely(err)) {
+ inode_unlock(h_src_inode);
+ goto out;
+ }
+ h_src_attr->valid = 1;
+ err = au_cp_regular(cpg);
+ inode_unlock(h_src_inode);
+ rerr = au_pin_hdir_relock(cpg->pin);
+ if (!err && rerr)
+ err = rerr;
+ }
+ if (!err && (h_src_inode->i_state & I_LINKABLE)) {
+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bdst);
+ h_dst_inode = d_inode(h_path.dentry);
+ spin_lock(&h_dst_inode->i_lock);
+ h_dst_inode->i_state |= I_LINKABLE;
+ spin_unlock(&h_dst_inode->i_lock);
+ }
+
+out:
+ return err;
+}
+
+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src,
+ struct inode *h_dir)
+{
+ int err, symlen;
+ mm_segment_t old_fs;
+ union {
+ char *k;
+ char __user *u;
+ } sym;
+ struct inode *h_inode = d_inode(h_src);
+ const struct inode_operations *h_iop = h_inode->i_op;
+
+ err = -ENOSYS;
+ if (unlikely(!h_iop->readlink))
+ goto out;
+
+ err = -ENOMEM;
+ sym.k = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!sym.k))
+ goto out;
+
+ /* unnecessary to support mmap_sem since symlink is not mmap-able */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ symlen = h_iop->readlink(h_src, sym.u, PATH_MAX);
+ err = symlen;
+ set_fs(old_fs);
+
+ if (symlen > 0) {
+ sym.k[symlen] = 0;
+ err = vfsub_symlink(h_dir, h_path, sym.k);
+ }
+ free_page((unsigned long)sym.k);
+
+out:
+ return err;
+}
+
+/*
+ * regardless 'acl' option, reset all ACL.
+ * All ACL will be copied up later from the original entry on the lower branch.
+ */
+static int au_reset_acl(struct inode *h_dir, struct path *h_path, umode_t mode)
+{
+ int err;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ h_dentry = h_path->dentry;
+ h_inode = d_inode(h_dentry);
+ /* forget_all_cached_acls(h_inode)); */
+ err = vfsub_removexattr(h_dentry, XATTR_NAME_POSIX_ACL_ACCESS);
+ AuTraceErr(err);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ if (!err)
+ err = vfsub_acl_chmod(h_inode, mode);
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_do_cpup_dir(struct au_cp_generic *cpg, struct dentry *dst_parent,
+ struct inode *h_dir, struct path *h_path)
+{
+ int err;
+ struct inode *dir, *inode;
+
+ err = vfsub_removexattr(h_path->dentry, XATTR_NAME_POSIX_ACL_DEFAULT);
+ AuTraceErr(err);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ if (unlikely(err))
+ goto out;
+
+ /*
+ * strange behaviour from the users view,
+ * particularry setattr case
+ */
+ dir = d_inode(dst_parent);
+ if (au_ibstart(dir) == cpg->bdst)
+ au_cpup_attr_nlink(dir, /*force*/1);
+ inode = d_inode(cpg->dentry);
+ au_cpup_attr_nlink(inode, /*force*/1);
+
+out:
+ return err;
+}
+
+static noinline_for_stack
+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent,
+ struct au_cpup_reg_attr *h_src_attr)
+{
+ int err;
+ umode_t mode;
+ unsigned int mnt_flags;
+ unsigned char isdir, isreg, force;
+ const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME);
+ struct au_dtime dt;
+ struct path h_path;
+ struct dentry *h_src, *h_dst, *h_parent;
+ struct inode *h_inode, *h_dir;
+ struct super_block *sb;
+
+ /* bsrc branch can be ro/rw. */
+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+ h_inode = d_inode(h_src);
+ AuDebugOn(h_inode != au_h_iptr(d_inode(cpg->dentry), cpg->bsrc));
+
+ /* try stopping to be referenced while we are creating */
+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+ if (au_ftest_cpup(cpg->flags, RENAME))
+ AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX,
+ AUFS_WH_PFX_LEN));
+ h_parent = h_dst->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+ AuDebugOn(h_parent != h_dst->d_parent);
+
+ sb = cpg->dentry->d_sb;
+ h_path.mnt = au_sbr_mnt(sb, cpg->bdst);
+ if (do_dt) {
+ h_path.dentry = h_parent;
+ au_dtime_store(&dt, dst_parent, &h_path);
+ }
+ h_path.dentry = h_dst;
+
+ isreg = 0;
+ isdir = 0;
+ mode = h_inode->i_mode;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ isreg = 1;
+ err = vfsub_create(h_dir, &h_path, S_IRUSR | S_IWUSR,
+ /*want_excl*/true);
+ if (!err)
+ err = au_do_cpup_regular(cpg, h_src_attr);
+ break;
+ case S_IFDIR:
+ isdir = 1;
+ err = vfsub_mkdir(h_dir, &h_path, mode);
+ if (!err)
+ err = au_do_cpup_dir(cpg, dst_parent, h_dir, &h_path);
+ break;
+ case S_IFLNK:
+ err = au_do_cpup_symlink(&h_path, h_src, h_dir);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ AuDebugOn(!capable(CAP_MKNOD));
+ /*FALLTHROUGH*/
+ case S_IFIFO:
+ case S_IFSOCK:
+ err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev);
+ break;
+ default:
+ AuIOErr("Unknown inode type 0%o\n", mode);
+ err = -EIO;
+ }
+ if (!err)
+ err = au_reset_acl(h_dir, &h_path, mode);
+
+ mnt_flags = au_mntflags(sb);
+ if (!au_opt_test(mnt_flags, UDBA_NONE)
+ && !isdir
+ && au_opt_test(mnt_flags, XINO)
+ && (h_inode->i_nlink == 1
+ || (h_inode->i_state & I_LINKABLE))
+ /* todo: unnecessary? */
+ /* && d_inode(cpg->dentry)->i_nlink == 1 */
+ && cpg->bdst < cpg->bsrc
+ && !au_ftest_cpup(cpg->flags, KEEPLINO))
+ au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0);
+ /* ignore this error */
+
+ if (!err) {
+ force = 0;
+ if (isreg) {
+ force = !!cpg->len;
+ if (cpg->len == -1)
+ force = !!i_size_read(h_inode);
+ }
+ au_fhsm_wrote(sb, cpg->bdst, force);
+ }
+
+ if (do_dt)
+ au_dtime_revert(&dt);
+ return err;
+}
+
+static int au_do_ren_after_cpup(struct au_cp_generic *cpg, struct path *h_path)
+{
+ int err;
+ struct dentry *dentry, *h_dentry, *h_parent, *parent;
+ struct inode *h_dir;
+ aufs_bindex_t bdst;
+
+ dentry = cpg->dentry;
+ bdst = cpg->bdst;
+ h_dentry = au_h_dptr(dentry, bdst);
+ if (!au_ftest_cpup(cpg->flags, OVERWRITE)) {
+ dget(h_dentry);
+ au_set_h_dptr(dentry, bdst, NULL);
+ err = au_lkup_neg(dentry, bdst, /*wh*/0);
+ if (!err)
+ h_path->dentry = dget(au_h_dptr(dentry, bdst));
+ au_set_h_dptr(dentry, bdst, h_dentry);
+ } else {
+ err = 0;
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bdst);
+ dput(parent);
+ h_path->dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
+ if (IS_ERR(h_path->dentry))
+ err = PTR_ERR(h_path->dentry);
+ }
+ if (unlikely(err))
+ goto out;
+
+ h_parent = h_dentry->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+ AuDbg("%pd %pd\n", h_dentry, h_path->dentry);
+ /* no delegation since it is just created */
+ err = vfsub_rename(h_dir, h_dentry, h_dir, h_path, /*delegated*/NULL);
+ dput(h_path->dentry);
+
+out:
+ return err;
+}
+
+/*
+ * copyup the @dentry from @bsrc to @bdst.
+ * the caller must set the both of lower dentries.
+ * @len is for truncating when it is -1 copyup the entire file.
+ * in link/rename cases, @dst_parent may be different from the real one.
+ * basic->bsrc can be larger than basic->bdst.
+ */
+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+ int err, rerr;
+ aufs_bindex_t old_ibstart;
+ unsigned char isdir, plink;
+ struct dentry *h_src, *h_dst, *h_parent;
+ struct inode *dst_inode, *h_dir, *inode, *delegated, *src_inode;
+ struct super_block *sb;
+ struct au_branch *br;
+ /* to reuduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct path h_path;
+ struct au_cpup_reg_attr h_src_attr;
+ } *a;
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+ a->h_src_attr.valid = 0;
+
+ sb = cpg->dentry->d_sb;
+ br = au_sbr(sb, cpg->bdst);
+ a->h_path.mnt = au_br_mnt(br);
+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst);
+ h_parent = h_dst->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+
+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc);
+ inode = d_inode(cpg->dentry);
+
+ if (!dst_parent)
+ dst_parent = dget_parent(cpg->dentry);
+ else
+ dget(dst_parent);
+
+ plink = !!au_opt_test(au_mntflags(sb), PLINK);
+ dst_inode = au_h_iptr(inode, cpg->bdst);
+ if (dst_inode) {
+ if (unlikely(!plink)) {
+ err = -EIO;
+ AuIOErr("hi%lu(i%lu) exists on b%d "
+ "but plink is disabled\n",
+ dst_inode->i_ino, inode->i_ino, cpg->bdst);
+ goto out_parent;
+ }
+
+ if (dst_inode->i_nlink) {
+ const int do_dt = au_ftest_cpup(cpg->flags, DTIME);
+
+ h_src = au_plink_lkup(inode, cpg->bdst);
+ err = PTR_ERR(h_src);
+ if (IS_ERR(h_src))
+ goto out_parent;
+ if (unlikely(d_is_negative(h_src))) {
+ err = -EIO;
+ AuIOErr("i%lu exists on b%d "
+ "but not pseudo-linked\n",
+ inode->i_ino, cpg->bdst);
+ dput(h_src);
+ goto out_parent;
+ }
+
+ if (do_dt) {
+ a->h_path.dentry = h_parent;
+ au_dtime_store(&a->dt, dst_parent, &a->h_path);
+ }
+
+ a->h_path.dentry = h_dst;
+ delegated = NULL;
+ err = vfsub_link(h_src, h_dir, &a->h_path, &delegated);
+ if (!err && au_ftest_cpup(cpg->flags, RENAME))
+ err = au_do_ren_after_cpup(cpg, &a->h_path);
+ if (do_dt)
+ au_dtime_revert(&a->dt);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ dput(h_src);
+ goto out_parent;
+ } else
+ /* todo: cpup_wh_file? */
+ /* udba work */
+ au_update_ibrange(inode, /*do_put_zero*/1);
+ }
+
+ isdir = S_ISDIR(inode->i_mode);
+ old_ibstart = au_ibstart(inode);
+ err = cpup_entry(cpg, dst_parent, &a->h_src_attr);
+ if (unlikely(err))
+ goto out_rev;
+ dst_inode = d_inode(h_dst);
+ inode_lock_nested(dst_inode, AuLsc_I_CHILD2);
+ /* todo: necessary? */
+ /* au_pin_hdir_unlock(cpg->pin); */
+
+ err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr);
+ if (unlikely(err)) {
+ /* todo: necessary? */
+ /* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */
+ inode_unlock(dst_inode);
+ goto out_rev;
+ }
+
+ if (cpg->bdst < old_ibstart) {
+ if (S_ISREG(inode->i_mode)) {
+ err = au_dy_iaop(inode, cpg->bdst, dst_inode);
+ if (unlikely(err)) {
+ /* ignore an error */
+ /* au_pin_hdir_relock(cpg->pin); */
+ inode_unlock(dst_inode);
+ goto out_rev;
+ }
+ }
+ au_set_ibstart(inode, cpg->bdst);
+ } else
+ au_set_ibend(inode, cpg->bdst);
+ au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode),
+ au_hi_flags(inode, isdir));
+
+ /* todo: necessary? */
+ /* err = au_pin_hdir_relock(cpg->pin); */
+ inode_unlock(dst_inode);
+ if (unlikely(err))
+ goto out_rev;
+
+ src_inode = d_inode(h_src);
+ if (!isdir
+ && (src_inode->i_nlink > 1
+ || src_inode->i_state & I_LINKABLE)
+ && plink)
+ au_plink_append(inode, cpg->bdst, h_dst);
+
+ if (au_ftest_cpup(cpg->flags, RENAME)) {
+ a->h_path.dentry = h_dst;
+ err = au_do_ren_after_cpup(cpg, &a->h_path);
+ }
+ if (!err)
+ goto out_parent; /* success */
+
+ /* revert */
+out_rev:
+ a->h_path.dentry = h_parent;
+ au_dtime_store(&a->dt, dst_parent, &a->h_path);
+ a->h_path.dentry = h_dst;
+ rerr = 0;
+ if (d_is_positive(h_dst)) {
+ if (!isdir) {
+ /* no delegation since it is just created */
+ rerr = vfsub_unlink(h_dir, &a->h_path,
+ /*delegated*/NULL, /*force*/0);
+ } else
+ rerr = vfsub_rmdir(h_dir, &a->h_path);
+ }
+ au_dtime_revert(&a->dt);
+ if (rerr) {
+ AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr);
+ err = -EIO;
+ }
+out_parent:
+ dput(dst_parent);
+ kfree(a);
+out:
+ return err;
+}
+
+#if 0 /* reserved */
+struct au_cpup_single_args {
+ int *errp;
+ struct au_cp_generic *cpg;
+ struct dentry *dst_parent;
+};
+
+static void au_call_cpup_single(void *args)
+{
+ struct au_cpup_single_args *a = args;
+
+ au_pin_hdir_acquire_nest(a->cpg->pin);
+ *a->errp = au_cpup_single(a->cpg, a->dst_parent);
+ au_pin_hdir_release(a->cpg->pin);
+}
+#endif
+
+/*
+ * prevent SIGXFSZ in copy-up.
+ * testing CAP_MKNOD is for generic fs,
+ * but CAP_FSETID is for xfs only, currently.
+ */
+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode)
+{
+ int do_sio;
+ struct super_block *sb;
+ struct inode *h_dir;
+
+ do_sio = 0;
+ sb = au_pinned_parent(pin)->d_sb;
+ if (!au_wkq_test()
+ && (!au_sbi(sb)->si_plink_maint_pid
+ || au_plink_maint(sb, AuLock_NOPLM))) {
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ /* no condition about RLIMIT_FSIZE and the file size */
+ do_sio = 1;
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ do_sio = !capable(CAP_MKNOD);
+ break;
+ }
+ if (!do_sio)
+ do_sio = ((mode & (S_ISUID | S_ISGID))
+ && !capable(CAP_FSETID));
+ /* this workaround may be removed in the future */
+ if (!do_sio) {
+ h_dir = au_pinned_h_dir(pin);
+ do_sio = h_dir->i_mode & S_ISVTX;
+ }
+ }
+
+ return do_sio;
+}
+
+#if 0 /* reserved */
+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent)
+{
+ int err, wkq_err;
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc);
+ if (!au_cpup_sio_test(pin, d_inode(h_dentry)->i_mode))
+ err = au_cpup_single(cpg, dst_parent);
+ else {
+ struct au_cpup_single_args args = {
+ .errp = &err,
+ .cpg = cpg,
+ .dst_parent = dst_parent
+ };
+ wkq_err = au_wkq_wait(au_call_cpup_single, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
+#endif
+
+/*
+ * copyup the @dentry from the first active lower branch to @bdst,
+ * using au_cpup_single().
+ */
+static int au_cpup_simple(struct au_cp_generic *cpg)
+{
+ int err;
+ unsigned int flags_orig;
+ struct dentry *dentry;
+
+ AuDebugOn(cpg->bsrc < 0);
+
+ dentry = cpg->dentry;
+ DiMustWriteLock(dentry);
+
+ err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1);
+ if (!err) {
+ flags_orig = cpg->flags;
+ au_fset_cpup(cpg->flags, RENAME);
+ err = au_cpup_single(cpg, NULL);
+ cpg->flags = flags_orig;
+ if (!err)
+ return 0; /* success */
+
+ /* revert */
+ au_set_h_dptr(dentry, cpg->bdst, NULL);
+ au_set_dbstart(dentry, cpg->bsrc);
+ }
+
+ return err;
+}
+
+struct au_cpup_simple_args {
+ int *errp;
+ struct au_cp_generic *cpg;
+};
+
+static void au_call_cpup_simple(void *args)
+{
+ struct au_cpup_simple_args *a = args;
+
+ au_pin_hdir_acquire_nest(a->cpg->pin);
+ *a->errp = au_cpup_simple(a->cpg);
+ au_pin_hdir_release(a->cpg->pin);
+}
+
+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+ int err, wkq_err;
+ struct dentry *dentry, *parent;
+ struct file *h_file;
+ struct inode *h_dir;
+
+ dentry = cpg->dentry;
+ h_file = NULL;
+ if (au_ftest_cpup(cpg->flags, HOPEN)) {
+ AuDebugOn(cpg->bsrc < 0);
+ h_file = au_h_open_pre(dentry, cpg->bsrc, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+ }
+
+ parent = dget_parent(dentry);
+ h_dir = au_h_iptr(d_inode(parent), cpg->bdst);
+ if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE)
+ && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
+ err = au_cpup_simple(cpg);
+ else {
+ struct au_cpup_simple_args args = {
+ .errp = &err,
+ .cpg = cpg
+ };
+ wkq_err = au_wkq_wait(au_call_cpup_simple, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ dput(parent);
+ if (h_file)
+ au_h_open_post(dentry, cpg->bsrc, h_file);
+
+out:
+ return err;
+}
+
+int au_sio_cpup_simple(struct au_cp_generic *cpg)
+{
+ aufs_bindex_t bsrc, bend;
+ struct dentry *dentry, *h_dentry;
+
+ if (cpg->bsrc < 0) {
+ dentry = cpg->dentry;
+ bend = au_dbend(dentry);
+ for (bsrc = cpg->bdst + 1; bsrc <= bend; bsrc++) {
+ h_dentry = au_h_dptr(dentry, bsrc);
+ if (h_dentry) {
+ AuDebugOn(d_is_negative(h_dentry));
+ break;
+ }
+ }
+ AuDebugOn(bsrc > bend);
+ cpg->bsrc = bsrc;
+ }
+ AuDebugOn(cpg->bsrc <= cpg->bdst);
+ return au_do_sio_cpup_simple(cpg);
+}
+
+int au_sio_cpdown_simple(struct au_cp_generic *cpg)
+{
+ AuDebugOn(cpg->bdst <= cpg->bsrc);
+ return au_do_sio_cpup_simple(cpg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * copyup the deleted file for writing.
+ */
+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry,
+ struct file *file)
+{
+ int err;
+ unsigned int flags_orig;
+ aufs_bindex_t bsrc_orig;
+ struct dentry *h_d_dst, *h_d_start;
+ struct au_dinfo *dinfo;
+ struct au_hdentry *hdp;
+
+ dinfo = au_di(cpg->dentry);
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ bsrc_orig = cpg->bsrc;
+ cpg->bsrc = dinfo->di_bstart;
+ hdp = dinfo->di_hdentry;
+ h_d_dst = hdp[0 + cpg->bdst].hd_dentry;
+ dinfo->di_bstart = cpg->bdst;
+ hdp[0 + cpg->bdst].hd_dentry = wh_dentry;
+ h_d_start = NULL;
+ if (file) {
+ h_d_start = hdp[0 + cpg->bsrc].hd_dentry;
+ hdp[0 + cpg->bsrc].hd_dentry = au_hf_top(file)->f_path.dentry;
+ }
+ flags_orig = cpg->flags;
+ cpg->flags = !AuCpup_DTIME;
+ err = au_cpup_single(cpg, /*h_parent*/NULL);
+ cpg->flags = flags_orig;
+ if (file) {
+ if (!err)
+ err = au_reopen_nondir(file);
+ hdp[0 + cpg->bsrc].hd_dentry = h_d_start;
+ }
+ hdp[0 + cpg->bdst].hd_dentry = h_d_dst;
+ dinfo->di_bstart = cpg->bsrc;
+ cpg->bsrc = bsrc_orig;
+
+ return err;
+}
+
+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+ int err;
+ aufs_bindex_t bdst;
+ struct au_dtime dt;
+ struct dentry *dentry, *parent, *h_parent, *wh_dentry;
+ struct au_branch *br;
+ struct path h_path;
+
+ dentry = cpg->dentry;
+ bdst = cpg->bdst;
+ br = au_sbr(dentry->d_sb, bdst);
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bdst);
+ wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out;
+
+ h_path.dentry = h_parent;
+ h_path.mnt = au_br_mnt(br);
+ au_dtime_store(&dt, parent, &h_path);
+ err = au_do_cpup_wh(cpg, wh_dentry, file);
+ if (unlikely(err))
+ goto out_wh;
+
+ dget(wh_dentry);
+ h_path.dentry = wh_dentry;
+ if (!d_is_dir(wh_dentry)) {
+ /* no delegation since it is just created */
+ err = vfsub_unlink(d_inode(h_parent), &h_path,
+ /*delegated*/NULL, /*force*/0);
+ } else
+ err = vfsub_rmdir(d_inode(h_parent), &h_path);
+ if (unlikely(err)) {
+ AuIOErr("failed remove copied-up tmp file %pd(%d)\n",
+ wh_dentry, err);
+ err = -EIO;
+ }
+ au_dtime_revert(&dt);
+ au_set_hi_wh(d_inode(dentry), bdst, wh_dentry);
+
+out_wh:
+ dput(wh_dentry);
+out:
+ dput(parent);
+ return err;
+}
+
+struct au_cpup_wh_args {
+ int *errp;
+ struct au_cp_generic *cpg;
+ struct file *file;
+};
+
+static void au_call_cpup_wh(void *args)
+{
+ struct au_cpup_wh_args *a = args;
+
+ au_pin_hdir_acquire_nest(a->cpg->pin);
+ *a->errp = au_cpup_wh(a->cpg, a->file);
+ au_pin_hdir_release(a->cpg->pin);
+}
+
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file)
+{
+ int err, wkq_err;
+ aufs_bindex_t bdst;
+ struct dentry *dentry, *parent, *h_orph, *h_parent;
+ struct inode *dir, *h_dir, *h_tmpdir;
+ struct au_wbr *wbr;
+ struct au_pin wh_pin, *pin_orig;
+
+ dentry = cpg->dentry;
+ bdst = cpg->bdst;
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ h_orph = NULL;
+ h_parent = NULL;
+ h_dir = au_igrab(au_h_iptr(dir, bdst));
+ h_tmpdir = h_dir;
+ pin_orig = NULL;
+ if (!h_dir->i_nlink) {
+ wbr = au_sbr(dentry->d_sb, bdst)->br_wbr;
+ h_orph = wbr->wbr_orph;
+
+ h_parent = dget(au_h_dptr(parent, bdst));
+ au_set_h_dptr(parent, bdst, dget(h_orph));
+ h_tmpdir = d_inode(h_orph);
+ au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0);
+
+ inode_lock_nested(h_tmpdir, AuLsc_I_PARENT3);
+ /* todo: au_h_open_pre()? */
+
+ pin_orig = cpg->pin;
+ au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT,
+ AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED);
+ cpg->pin = &wh_pin;
+ }
+
+ if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE)
+ && !au_cpup_sio_test(cpg->pin, d_inode(dentry)->i_mode))
+ err = au_cpup_wh(cpg, file);
+ else {
+ struct au_cpup_wh_args args = {
+ .errp = &err,
+ .cpg = cpg,
+ .file = file
+ };
+ wkq_err = au_wkq_wait(au_call_cpup_wh, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ if (h_orph) {
+ inode_unlock(h_tmpdir);
+ /* todo: au_h_open_post()? */
+ au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0);
+ au_set_h_dptr(parent, bdst, h_parent);
+ AuDebugOn(!pin_orig);
+ cpg->pin = pin_orig;
+ }
+ iput(h_dir);
+ dput(parent);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generic routine for both of copy-up and copy-down.
+ */
+/* cf. revalidate function in file.c */
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent, void *arg),
+ void *arg)
+{
+ int err;
+ struct au_pin pin;
+ struct dentry *d, *parent, *h_parent, *real_parent, *h_dentry;
+
+ err = 0;
+ parent = dget_parent(dentry);
+ if (IS_ROOT(parent))
+ goto out;
+
+ au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2,
+ au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE);
+
+ /* do not use au_dpage */
+ real_parent = parent;
+ while (1) {
+ dput(parent);
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bdst);
+ if (h_parent)
+ goto out; /* success */
+
+ /* find top dir which is necessary to cpup */
+ do {
+ d = parent;
+ dput(parent);
+ parent = dget_parent(d);
+ di_read_lock_parent3(parent, !AuLock_IR);
+ h_parent = au_h_dptr(parent, bdst);
+ di_read_unlock(parent, !AuLock_IR);
+ } while (!h_parent);
+
+ if (d != real_parent)
+ di_write_lock_child3(d);
+
+ /* somebody else might create while we were sleeping */
+ h_dentry = au_h_dptr(d, bdst);
+ if (!h_dentry || d_is_negative(h_dentry)) {
+ if (h_dentry)
+ au_update_dbstart(d);
+
+ au_pin_set_dentry(&pin, d);
+ err = au_do_pin(&pin);
+ if (!err) {
+ err = cp(d, bdst, &pin, h_parent, arg);
+ au_unpin(&pin);
+ }
+ }
+
+ if (d != real_parent)
+ di_write_unlock(d);
+ if (unlikely(err))
+ break;
+ }
+
+out:
+ dput(parent);
+ return err;
+}
+
+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent __maybe_unused,
+ void *arg __maybe_unused)
+{
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = bdst,
+ .bsrc = -1,
+ .len = 0,
+ .pin = pin,
+ .flags = AuCpup_DTIME
+ };
+ return au_sio_cpup_simple(&cpg);
+}
+
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+ return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL);
+}
+
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+ int err;
+ struct dentry *parent;
+ struct inode *dir;
+
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ err = 0;
+ if (au_h_iptr(dir, bdst))
+ goto out;
+
+ di_read_unlock(parent, AuLock_IR);
+ di_write_lock_parent(parent);
+ /* someone else might change our inode while we were sleeping */
+ if (!au_h_iptr(dir, bdst))
+ err = au_cpup_dirs(dentry, bdst);
+ di_downgrade_lock(parent, AuLock_IR);
+
+out:
+ dput(parent);
+ return err;
+}
diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h
new file mode 100644
index 000000000..ccba2c427
--- /dev/null
+++ b/fs/aufs/cpup.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * copy-up/down functions
+ */
+
+#ifndef __AUFS_CPUP_H__
+#define __AUFS_CPUP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct inode;
+struct file;
+struct au_pin;
+
+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags);
+void au_cpup_attr_timesizes(struct inode *inode);
+void au_cpup_attr_nlink(struct inode *inode, int force);
+void au_cpup_attr_changeable(struct inode *inode);
+void au_cpup_igen(struct inode *inode, struct inode *h_inode);
+void au_cpup_attr_all(struct inode *inode, int force);
+
+/* ---------------------------------------------------------------------- */
+
+struct au_cp_generic {
+ struct dentry *dentry;
+ aufs_bindex_t bdst, bsrc;
+ loff_t len;
+ struct au_pin *pin;
+ unsigned int flags;
+};
+
+/* cpup flags */
+#define AuCpup_DTIME 1 /* do dtime_store/revert */
+#define AuCpup_KEEPLINO (1 << 1) /* do not clear the lower xino,
+ for link(2) */
+#define AuCpup_RENAME (1 << 2) /* rename after cpup */
+#define AuCpup_HOPEN (1 << 3) /* call h_open_pre/post() in
+ cpup */
+#define AuCpup_OVERWRITE (1 << 4) /* allow overwriting the
+ existing entry */
+#define AuCpup_RWDST (1 << 5) /* force write target even if
+ the branch is marked as RO */
+
+#define au_ftest_cpup(flags, name) ((flags) & AuCpup_##name)
+#define au_fset_cpup(flags, name) \
+ do { (flags) |= AuCpup_##name; } while (0)
+#define au_fclr_cpup(flags, name) \
+ do { (flags) &= ~AuCpup_##name; } while (0)
+
+int au_copy_file(struct file *dst, struct file *src, loff_t len);
+int au_sio_cpup_simple(struct au_cp_generic *cpg);
+int au_sio_cpdown_simple(struct au_cp_generic *cpg);
+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file);
+
+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst,
+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent, void *arg),
+ void *arg);
+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+
+/* ---------------------------------------------------------------------- */
+
+/* keep timestamps when copyup */
+struct au_dtime {
+ struct dentry *dt_dentry;
+ struct path dt_h_path;
+ struct timespec dt_atime, dt_mtime;
+};
+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry,
+ struct path *h_path);
+void au_dtime_revert(struct au_dtime *dt);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_CPUP_H__ */
diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c
new file mode 100644
index 000000000..0aefb5ed8
--- /dev/null
+++ b/fs/aufs/dbgaufs.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * debugfs interface
+ */
+
+#include <linux/debugfs.h>
+#include "aufs.h"
+
+#ifndef CONFIG_SYSFS
+#error DEBUG_FS depends upon SYSFS
+#endif
+
+static struct dentry *dbgaufs;
+static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+/* 20 is max digits length of ulong 64 */
+struct dbgaufs_arg {
+ int n;
+ char a[20 * 4];
+};
+
+/*
+ * common function for all XINO files
+ */
+static int dbgaufs_xi_release(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt)
+{
+ int err;
+ struct kstat st;
+ struct dbgaufs_arg *p;
+
+ err = -ENOMEM;
+ p = kmalloc(sizeof(*p), GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+
+ err = 0;
+ p->n = 0;
+ file->private_data = p;
+ if (!xf)
+ goto out;
+
+ err = vfs_getattr(&xf->f_path, &st);
+ if (!err) {
+ if (do_fcnt)
+ p->n = snprintf
+ (p->a, sizeof(p->a), "%ld, %llux%lu %lld\n",
+ (long)file_count(xf), st.blocks, st.blksize,
+ (long long)st.size);
+ else
+ p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n",
+ st.blocks, st.blksize,
+ (long long)st.size);
+ AuDebugOn(p->n >= sizeof(p->a));
+ } else {
+ p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err);
+ err = 0;
+ }
+
+out:
+ return err;
+
+}
+
+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgaufs_arg *p;
+
+ p = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dbgaufs_plink_arg {
+ int n;
+ char a[];
+};
+
+static int dbgaufs_plink_release(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ free_page((unsigned long)file->private_data);
+ return 0;
+}
+
+static int dbgaufs_plink_open(struct inode *inode, struct file *file)
+{
+ int err, i, limit;
+ unsigned long n, sum;
+ struct dbgaufs_plink_arg *p;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+ struct au_sphlhead *sphl;
+
+ err = -ENOMEM;
+ p = (void *)get_zeroed_page(GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+
+ err = -EFBIG;
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ if (au_opt_test(au_mntflags(sb), PLINK)) {
+ limit = PAGE_SIZE - sizeof(p->n);
+
+ /* the number of buckets */
+ n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH);
+ p->n += n;
+ limit -= n;
+
+ sum = 0;
+ for (i = 0, sphl = sbinfo->si_plink;
+ i < AuPlink_NHASH;
+ i++, sphl++) {
+ n = au_sphl_count(sphl);
+ sum += n;
+
+ n = snprintf(p->a + p->n, limit, "%lu ", n);
+ p->n += n;
+ limit -= n;
+ if (unlikely(limit <= 0))
+ goto out_free;
+ }
+ p->a[p->n - 1] = '\n';
+
+ /* the sum of plinks */
+ n = snprintf(p->a + p->n, limit, "%lu\n", sum);
+ p->n += n;
+ limit -= n;
+ if (unlikely(limit <= 0))
+ goto out_free;
+ } else {
+#define str "1\n0\n0\n"
+ p->n = sizeof(str) - 1;
+ strcpy(p->a, str);
+#undef str
+ }
+ si_read_unlock(sb);
+
+ err = 0;
+ file->private_data = p;
+ goto out; /* success */
+
+out_free:
+ free_page((unsigned long)p);
+out:
+ return err;
+}
+
+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgaufs_plink_arg *p;
+
+ p = file->private_data;
+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n);
+}
+
+static const struct file_operations dbgaufs_plink_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_plink_open,
+ .release = dbgaufs_plink_release,
+ .read = dbgaufs_plink_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int dbgaufs_xib_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0);
+ si_read_unlock(sb);
+ return err;
+}
+
+static const struct file_operations dbgaufs_xib_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_xib_open,
+ .release = dbgaufs_xi_release,
+ .read = dbgaufs_xi_read
+};
+
+/* ---------------------------------------------------------------------- */
+
+#define DbgaufsXi_PREFIX "xi"
+
+static int dbgaufs_xino_open(struct inode *inode, struct file *file)
+{
+ int err;
+ long l;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+ struct file *xf;
+ struct qstr *name;
+
+ err = -ENOENT;
+ xf = NULL;
+ name = &file->f_path.dentry->d_name;
+ if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX)
+ || memcmp(name->name, DbgaufsXi_PREFIX,
+ sizeof(DbgaufsXi_PREFIX) - 1)))
+ goto out;
+ err = kstrtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l);
+ if (unlikely(err))
+ goto out;
+
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ if (l <= au_sbend(sb)) {
+ xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file;
+ err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1);
+ } else
+ err = -ENOENT;
+ si_read_unlock(sb);
+
+out:
+ return err;
+}
+
+static const struct file_operations dbgaufs_xino_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_xino_open,
+ .release = dbgaufs_xi_release,
+ .read = dbgaufs_xi_read
+};
+
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+ aufs_bindex_t bend;
+ struct au_branch *br;
+ struct au_xino_file *xi;
+
+ if (!au_sbi(sb)->si_dbgaufs)
+ return;
+
+ bend = au_sbend(sb);
+ for (; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ xi = &br->br_xino;
+ debugfs_remove(xi->xi_dbgaufs);
+ xi->xi_dbgaufs = NULL;
+ }
+}
+
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+ struct au_sbinfo *sbinfo;
+ struct dentry *parent;
+ struct au_branch *br;
+ struct au_xino_file *xi;
+ aufs_bindex_t bend;
+ char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */
+
+ sbinfo = au_sbi(sb);
+ parent = sbinfo->si_dbgaufs;
+ if (!parent)
+ return;
+
+ bend = au_sbend(sb);
+ for (; bindex <= bend; bindex++) {
+ snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex);
+ br = au_sbr(sb, bindex);
+ xi = &br->br_xino;
+ AuDebugOn(xi->xi_dbgaufs);
+ xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent,
+ sbinfo, &dbgaufs_xino_fop);
+ /* ignore an error */
+ if (unlikely(!xi->xi_dbgaufs))
+ AuWarn1("failed %s under debugfs\n", name);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+static int dbgaufs_xigen_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+
+ sbinfo = inode->i_private;
+ sb = sbinfo->si_sb;
+ si_noflush_read_lock(sb);
+ err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0);
+ si_read_unlock(sb);
+ return err;
+}
+
+static const struct file_operations dbgaufs_xigen_fop = {
+ .owner = THIS_MODULE,
+ .open = dbgaufs_xigen_open,
+ .release = dbgaufs_xi_release,
+ .read = dbgaufs_xi_read
+};
+
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+ int err;
+
+ /*
+ * This function is a dynamic '__init' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+ err = -EIO;
+ sbinfo->si_dbgaufs_xigen = debugfs_create_file
+ ("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+ &dbgaufs_xigen_fop);
+ if (sbinfo->si_dbgaufs_xigen)
+ err = 0;
+
+ return err;
+}
+#else
+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo)
+{
+ return 0;
+}
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo)
+{
+ /*
+ * This function is a dynamic '__fin' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+ debugfs_remove_recursive(sbinfo->si_dbgaufs);
+ sbinfo->si_dbgaufs = NULL;
+ kobject_put(&sbinfo->si_kobj);
+}
+
+int dbgaufs_si_init(struct au_sbinfo *sbinfo)
+{
+ int err;
+ char name[SysaufsSiNameLen];
+
+ /*
+ * This function is a dynamic '__init' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+
+ err = -ENOENT;
+ if (!dbgaufs) {
+ AuErr1("/debug/aufs is uninitialized\n");
+ goto out;
+ }
+
+ err = -EIO;
+ sysaufs_name(sbinfo, name);
+ sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs);
+ if (unlikely(!sbinfo->si_dbgaufs))
+ goto out;
+ kobject_get(&sbinfo->si_kobj);
+
+ sbinfo->si_dbgaufs_xib = debugfs_create_file
+ ("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+ &dbgaufs_xib_fop);
+ if (unlikely(!sbinfo->si_dbgaufs_xib))
+ goto out_dir;
+
+ sbinfo->si_dbgaufs_plink = debugfs_create_file
+ ("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo,
+ &dbgaufs_plink_fop);
+ if (unlikely(!sbinfo->si_dbgaufs_plink))
+ goto out_dir;
+
+ err = dbgaufs_xigen_init(sbinfo);
+ if (!err)
+ goto out; /* success */
+
+out_dir:
+ dbgaufs_si_fin(sbinfo);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void dbgaufs_fin(void)
+{
+ debugfs_remove(dbgaufs);
+}
+
+int __init dbgaufs_init(void)
+{
+ int err;
+
+ err = -EIO;
+ dbgaufs = debugfs_create_dir(AUFS_NAME, NULL);
+ if (dbgaufs)
+ err = 0;
+ return err;
+}
diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h
new file mode 100644
index 000000000..81f272e42
--- /dev/null
+++ b/fs/aufs/dbgaufs.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * debugfs interface
+ */
+
+#ifndef __DBGAUFS_H__
+#define __DBGAUFS_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+struct au_sbinfo;
+
+#ifdef CONFIG_DEBUG_FS
+/* dbgaufs.c */
+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void dbgaufs_si_fin(struct au_sbinfo *sbinfo);
+int dbgaufs_si_init(struct au_sbinfo *sbinfo);
+void dbgaufs_fin(void);
+int __init dbgaufs_init(void);
+#else
+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo)
+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo)
+AuStubVoid(dbgaufs_fin, void)
+AuStubInt0(__init dbgaufs_init, void)
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __KERNEL__ */
+#endif /* __DBGAUFS_H__ */
diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c
new file mode 100644
index 000000000..e72accebb
--- /dev/null
+++ b/fs/aufs/dcsub.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#include "aufs.h"
+
+static void au_dpage_free(struct au_dpage *dpage)
+{
+ int i;
+ struct dentry **p;
+
+ p = dpage->dentries;
+ for (i = 0; i < dpage->ndentry; i++)
+ dput(*p++);
+ free_page((unsigned long)dpage->dentries);
+}
+
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp)
+{
+ int err;
+ void *p;
+
+ err = -ENOMEM;
+ dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp);
+ if (unlikely(!dpages->dpages))
+ goto out;
+
+ p = (void *)__get_free_page(gfp);
+ if (unlikely(!p))
+ goto out_dpages;
+
+ dpages->dpages[0].ndentry = 0;
+ dpages->dpages[0].dentries = p;
+ dpages->ndpage = 1;
+ return 0; /* success */
+
+out_dpages:
+ kfree(dpages->dpages);
+out:
+ return err;
+}
+
+void au_dpages_free(struct au_dcsub_pages *dpages)
+{
+ int i;
+ struct au_dpage *p;
+
+ p = dpages->dpages;
+ for (i = 0; i < dpages->ndpage; i++)
+ au_dpage_free(p++);
+ kfree(dpages->dpages);
+}
+
+static int au_dpages_append(struct au_dcsub_pages *dpages,
+ struct dentry *dentry, gfp_t gfp)
+{
+ int err, sz;
+ struct au_dpage *dpage;
+ void *p;
+
+ dpage = dpages->dpages + dpages->ndpage - 1;
+ sz = PAGE_SIZE / sizeof(dentry);
+ if (unlikely(dpage->ndentry >= sz)) {
+ AuLabel(new dpage);
+ err = -ENOMEM;
+ sz = dpages->ndpage * sizeof(*dpages->dpages);
+ p = au_kzrealloc(dpages->dpages, sz,
+ sz + sizeof(*dpages->dpages), gfp);
+ if (unlikely(!p))
+ goto out;
+
+ dpages->dpages = p;
+ dpage = dpages->dpages + dpages->ndpage;
+ p = (void *)__get_free_page(gfp);
+ if (unlikely(!p))
+ goto out;
+
+ dpage->ndentry = 0;
+ dpage->dentries = p;
+ dpages->ndpage++;
+ }
+
+ AuDebugOn(au_dcount(dentry) <= 0);
+ dpage->dentries[dpage->ndentry++] = dget_dlock(dentry);
+ return 0; /* success */
+
+out:
+ return err;
+}
+
+/* todo: BAD approach */
+/* copied from linux/fs/dcache.c */
+enum d_walk_ret {
+ D_WALK_CONTINUE,
+ D_WALK_QUIT,
+ D_WALK_NORETRY,
+ D_WALK_SKIP,
+};
+
+extern void d_walk(struct dentry *parent, void *data,
+ enum d_walk_ret (*enter)(void *, struct dentry *),
+ void (*finish)(void *));
+
+struct ac_dpages_arg {
+ int err;
+ struct au_dcsub_pages *dpages;
+ struct super_block *sb;
+ au_dpages_test test;
+ void *arg;
+};
+
+static enum d_walk_ret au_call_dpages_append(void *_arg, struct dentry *dentry)
+{
+ enum d_walk_ret ret;
+ struct ac_dpages_arg *arg = _arg;
+
+ ret = D_WALK_CONTINUE;
+ if (dentry->d_sb == arg->sb
+ && !IS_ROOT(dentry)
+ && au_dcount(dentry) > 0
+ && au_di(dentry)
+ && (!arg->test || arg->test(dentry, arg->arg))) {
+ arg->err = au_dpages_append(arg->dpages, dentry, GFP_ATOMIC);
+ if (unlikely(arg->err))
+ ret = D_WALK_QUIT;
+ }
+
+ return ret;
+}
+
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+ au_dpages_test test, void *arg)
+{
+ struct ac_dpages_arg args = {
+ .err = 0,
+ .dpages = dpages,
+ .sb = root->d_sb,
+ .test = test,
+ .arg = arg
+ };
+
+ d_walk(root, &args, au_call_dpages_append, NULL);
+
+ return args.err;
+}
+
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+ int do_include, au_dpages_test test, void *arg)
+{
+ int err;
+
+ err = 0;
+ write_seqlock(&rename_lock);
+ spin_lock(&dentry->d_lock);
+ if (do_include
+ && au_dcount(dentry) > 0
+ && (!test || test(dentry, arg)))
+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+ spin_unlock(&dentry->d_lock);
+ if (unlikely(err))
+ goto out;
+
+ /*
+ * RCU for vfsmount is unnecessary since this is a traverse in a single
+ * mount
+ */
+ while (!IS_ROOT(dentry)) {
+ dentry = dentry->d_parent; /* rename_lock is locked */
+ spin_lock(&dentry->d_lock);
+ if (au_dcount(dentry) > 0
+ && (!test || test(dentry, arg)))
+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC);
+ spin_unlock(&dentry->d_lock);
+ if (unlikely(err))
+ break;
+ }
+
+out:
+ write_sequnlock(&rename_lock);
+ return err;
+}
+
+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg)
+{
+ return au_di(dentry) && dentry->d_sb == arg;
+}
+
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+ struct dentry *dentry, int do_include)
+{
+ return au_dcsub_pages_rev(dpages, dentry, do_include,
+ au_dcsub_dpages_aufs, dentry->d_sb);
+}
+
+int au_test_subdir(struct dentry *d1, struct dentry *d2)
+{
+ struct path path[2] = {
+ {
+ .dentry = d1
+ },
+ {
+ .dentry = d2
+ }
+ };
+
+ return path_is_under(path + 0, path + 1);
+}
diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h
new file mode 100644
index 000000000..5d2cf661d
--- /dev/null
+++ b/fs/aufs/dcsub.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sub-routines for dentry cache
+ */
+
+#ifndef __AUFS_DCSUB_H__
+#define __AUFS_DCSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include <linux/fs.h>
+
+struct au_dpage {
+ int ndentry;
+ struct dentry **dentries;
+};
+
+struct au_dcsub_pages {
+ int ndpage;
+ struct au_dpage *dpages;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dcsub.c */
+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp);
+void au_dpages_free(struct au_dcsub_pages *dpages);
+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg);
+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
+ au_dpages_test test, void *arg);
+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry,
+ int do_include, au_dpages_test test, void *arg);
+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages,
+ struct dentry *dentry, int do_include);
+int au_test_subdir(struct dentry *d1, struct dentry *d2);
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * todo: in linux-3.13, several similar (but faster) helpers are added to
+ * include/linux/dcache.h. Try them (in the future).
+ */
+
+static inline int au_d_hashed_positive(struct dentry *d)
+{
+ int err;
+ struct inode *inode = d_inode(d);
+
+ err = 0;
+ if (unlikely(d_unhashed(d)
+ || d_is_negative(d)
+ || !inode->i_nlink))
+ err = -ENOENT;
+ return err;
+}
+
+static inline int au_d_linkable(struct dentry *d)
+{
+ int err;
+ struct inode *inode = d_inode(d);
+
+ err = au_d_hashed_positive(d);
+ if (err
+ && d_is_positive(d)
+ && (inode->i_state & I_LINKABLE))
+ err = 0;
+ return err;
+}
+
+static inline int au_d_alive(struct dentry *d)
+{
+ int err;
+ struct inode *inode;
+
+ err = 0;
+ if (!IS_ROOT(d))
+ err = au_d_hashed_positive(d);
+ else {
+ inode = d_inode(d);
+ if (unlikely(d_unlinked(d)
+ || d_is_negative(d)
+ || !inode->i_nlink))
+ err = -ENOENT;
+ }
+ return err;
+}
+
+static inline int au_alive_dir(struct dentry *d)
+{
+ int err;
+
+ err = au_d_alive(d);
+ if (unlikely(err || IS_DEADDIR(d_inode(d))))
+ err = -ENOENT;
+ return err;
+}
+
+static inline int au_qstreq(struct qstr *a, struct qstr *b)
+{
+ return a->len == b->len
+ && !memcmp(a->name, b->name, a->len);
+}
+
+/*
+ * by the commit
+ * 360f547 2015-01-25 dcache: let the dentry count go down to zero without
+ * taking d_lock
+ * the type of d_lockref.count became int, but the inlined function d_count()
+ * still returns unsigned int.
+ * I don't know why. Maybe it is for every d_count() users?
+ * Anyway au_dcount() lives on.
+ */
+static inline int au_dcount(struct dentry *d)
+{
+ return (int)d_count(d);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DCSUB_H__ */
diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c
new file mode 100644
index 000000000..4529831a9
--- /dev/null
+++ b/fs/aufs/debug.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * debug print functions
+ */
+
+#include "aufs.h"
+
+/* Returns 0, or -errno. arg is in kp->arg. */
+static int param_atomic_t_set(const char *val, const struct kernel_param *kp)
+{
+ int err, n;
+
+ err = kstrtoint(val, 0, &n);
+ if (!err) {
+ if (n > 0)
+ au_debug_on();
+ else
+ au_debug_off();
+ }
+ return err;
+}
+
+/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp)
+{
+ atomic_t *a;
+
+ a = kp->arg;
+ return sprintf(buffer, "%d", atomic_read(a));
+}
+
+static struct kernel_param_ops param_ops_atomic_t = {
+ .set = param_atomic_t_set,
+ .get = param_atomic_t_get
+ /* void (*free)(void *arg) */
+};
+
+atomic_t aufs_debug = ATOMIC_INIT(0);
+MODULE_PARM_DESC(debug, "debug print");
+module_param_named(debug, aufs_debug, atomic_t, S_IRUGO | S_IWUSR | S_IWGRP);
+
+DEFINE_MUTEX(au_dbg_mtx); /* just to serialize the dbg msgs */
+char *au_plevel = KERN_DEBUG;
+#define dpri(fmt, ...) do { \
+ if ((au_plevel \
+ && strcmp(au_plevel, KERN_DEBUG)) \
+ || au_debug_test()) \
+ printk("%s" fmt, au_plevel, ##__VA_ARGS__); \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+void au_dpri_whlist(struct au_nhash *whlist)
+{
+ unsigned long ul, n;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+
+ n = whlist->nh_num;
+ head = whlist->nh_head;
+ for (ul = 0; ul < n; ul++) {
+ hlist_for_each_entry(pos, head, wh_hash)
+ dpri("b%d, %.*s, %d\n",
+ pos->wh_bindex,
+ pos->wh_str.len, pos->wh_str.name,
+ pos->wh_str.len);
+ head++;
+ }
+}
+
+void au_dpri_vdir(struct au_vdir *vdir)
+{
+ unsigned long ul;
+ union au_vdir_deblk_p p;
+ unsigned char *o;
+
+ if (!vdir || IS_ERR(vdir)) {
+ dpri("err %ld\n", PTR_ERR(vdir));
+ return;
+ }
+
+ dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n",
+ vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk,
+ vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version);
+ for (ul = 0; ul < vdir->vd_nblk; ul++) {
+ p.deblk = vdir->vd_deblk[ul];
+ o = p.deblk;
+ dpri("[%lu]: %p\n", ul, o);
+ }
+}
+
+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn,
+ struct dentry *wh)
+{
+ char *n = NULL;
+ int l = 0;
+
+ if (!inode || IS_ERR(inode)) {
+ dpri("i%d: err %ld\n", bindex, PTR_ERR(inode));
+ return -1;
+ }
+
+ /* the type of i_blocks depends upon CONFIG_LBDAF */
+ BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long)
+ && sizeof(inode->i_blocks) != sizeof(u64));
+ if (wh) {
+ n = (void *)wh->d_name.name;
+ l = wh->d_name.len;
+ }
+
+ dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu,"
+ " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n",
+ bindex, inode,
+ inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??",
+ atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode,
+ i_size_read(inode), (unsigned long long)inode->i_blocks,
+ hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff,
+ inode->i_mapping ? inode->i_mapping->nrpages : 0,
+ inode->i_state, inode->i_flags, inode->i_version,
+ inode->i_generation,
+ l ? ", wh " : "", l, n);
+ return 0;
+}
+
+void au_dpri_inode(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ aufs_bindex_t bindex;
+ int err, hn;
+
+ err = do_pri_inode(-1, inode, -1, NULL);
+ if (err || !au_test_aufs(inode->i_sb))
+ return;
+
+ iinfo = au_ii(inode);
+ if (!iinfo)
+ return;
+ dpri("i-1: bstart %d, bend %d, gen %d\n",
+ iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode, NULL));
+ if (iinfo->ii_bstart < 0)
+ return;
+ hn = 0;
+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++) {
+ hn = !!au_hn(iinfo->ii_hinode + bindex);
+ do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode, hn,
+ iinfo->ii_hinode[0 + bindex].hi_whdentry);
+ }
+}
+
+void au_dpri_dalias(struct inode *inode)
+{
+ struct dentry *d;
+
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias)
+ au_dpri_dentry(d);
+ spin_unlock(&inode->i_lock);
+}
+
+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry)
+{
+ struct dentry *wh = NULL;
+ int hn;
+ struct au_iinfo *iinfo;
+
+ if (!dentry || IS_ERR(dentry)) {
+ dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry));
+ return -1;
+ }
+ /* do not call dget_parent() here */
+ /* note: access d_xxx without d_lock */
+ dpri("d%d: %p, %pd2?, %s, cnt %d, flags 0x%x, %shashed\n",
+ bindex, dentry, dentry,
+ dentry->d_sb ? au_sbtype(dentry->d_sb) : "??",
+ au_dcount(dentry), dentry->d_flags,
+ d_unhashed(dentry) ? "un" : "");
+ hn = -1;
+ if (bindex >= 0
+ && d_is_positive(dentry)
+ && au_test_aufs(dentry->d_sb)) {
+ iinfo = au_ii(d_inode(dentry));
+ if (iinfo) {
+ hn = !!au_hn(iinfo->ii_hinode + bindex);
+ wh = iinfo->ii_hinode[0 + bindex].hi_whdentry;
+ }
+ }
+ do_pri_inode(bindex, d_inode(dentry), hn, wh);
+ return 0;
+}
+
+void au_dpri_dentry(struct dentry *dentry)
+{
+ struct au_dinfo *dinfo;
+ aufs_bindex_t bindex;
+ int err;
+ struct au_hdentry *hdp;
+
+ err = do_pri_dentry(-1, dentry);
+ if (err || !au_test_aufs(dentry->d_sb))
+ return;
+
+ dinfo = au_di(dentry);
+ if (!dinfo)
+ return;
+ dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d, tmp %d\n",
+ dinfo->di_bstart, dinfo->di_bend,
+ dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry),
+ dinfo->di_tmpfile);
+ if (dinfo->di_bstart < 0)
+ return;
+ hdp = dinfo->di_hdentry;
+ for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++)
+ do_pri_dentry(bindex, hdp[0 + bindex].hd_dentry);
+}
+
+static int do_pri_file(aufs_bindex_t bindex, struct file *file)
+{
+ char a[32];
+
+ if (!file || IS_ERR(file)) {
+ dpri("f%d: err %ld\n", bindex, PTR_ERR(file));
+ return -1;
+ }
+ a[0] = 0;
+ if (bindex < 0
+ && !IS_ERR_OR_NULL(file->f_path.dentry)
+ && au_test_aufs(file->f_path.dentry->d_sb)
+ && au_fi(file))
+ snprintf(a, sizeof(a), ", gen %d, mmapped %d",
+ au_figen(file), atomic_read(&au_fi(file)->fi_mmapped));
+ dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n",
+ bindex, file->f_mode, file->f_flags, (long)file_count(file),
+ file->f_version, file->f_pos, a);
+ if (!IS_ERR_OR_NULL(file->f_path.dentry))
+ do_pri_dentry(bindex, file->f_path.dentry);
+ return 0;
+}
+
+void au_dpri_file(struct file *file)
+{
+ struct au_finfo *finfo;
+ struct au_fidir *fidir;
+ struct au_hfile *hfile;
+ aufs_bindex_t bindex;
+ int err;
+
+ err = do_pri_file(-1, file);
+ if (err
+ || IS_ERR_OR_NULL(file->f_path.dentry)
+ || !au_test_aufs(file->f_path.dentry->d_sb))
+ return;
+
+ finfo = au_fi(file);
+ if (!finfo)
+ return;
+ if (finfo->fi_btop < 0)
+ return;
+ fidir = finfo->fi_hdir;
+ if (!fidir)
+ do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file);
+ else
+ for (bindex = finfo->fi_btop;
+ bindex >= 0 && bindex <= fidir->fd_bbot;
+ bindex++) {
+ hfile = fidir->fd_hfile + bindex;
+ do_pri_file(bindex, hfile ? hfile->hf_file : NULL);
+ }
+}
+
+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br)
+{
+ struct vfsmount *mnt;
+ struct super_block *sb;
+
+ if (!br || IS_ERR(br))
+ goto out;
+ mnt = au_br_mnt(br);
+ if (!mnt || IS_ERR(mnt))
+ goto out;
+ sb = mnt->mnt_sb;
+ if (!sb || IS_ERR(sb))
+ goto out;
+
+ dpri("s%d: {perm 0x%x, id %d, cnt %d, wbr %p}, "
+ "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, "
+ "xino %d\n",
+ bindex, br->br_perm, br->br_id, atomic_read(&br->br_count),
+ br->br_wbr, au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev),
+ sb->s_flags, sb->s_count,
+ atomic_read(&sb->s_active), !!br->br_xino.xi_file);
+ return 0;
+
+out:
+ dpri("s%d: err %ld\n", bindex, PTR_ERR(br));
+ return -1;
+}
+
+void au_dpri_sb(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+ aufs_bindex_t bindex;
+ int err;
+ /* to reuduce stack size */
+ struct {
+ struct vfsmount mnt;
+ struct au_branch fake;
+ } *a;
+
+ /* this function can be called from magic sysrq */
+ a = kzalloc(sizeof(*a), GFP_ATOMIC);
+ if (unlikely(!a)) {
+ dpri("no memory\n");
+ return;
+ }
+
+ a->mnt.mnt_sb = sb;
+ a->fake.br_path.mnt = &a->mnt;
+ atomic_set(&a->fake.br_count, 0);
+ smp_mb(); /* atomic_set */
+ err = do_pri_br(-1, &a->fake);
+ kfree(a);
+ dpri("dev 0x%x\n", sb->s_dev);
+ if (err || !au_test_aufs(sb))
+ return;
+
+ sbinfo = au_sbi(sb);
+ if (!sbinfo)
+ return;
+ dpri("nw %d, gen %u, kobj %d\n",
+ atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation,
+ atomic_read(&sbinfo->si_kobj.kref.refcount));
+ for (bindex = 0; bindex <= sbinfo->si_bend; bindex++)
+ do_pri_br(bindex, sbinfo->si_branch[0 + bindex]);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line)
+{
+ struct inode *h_inode, *inode = d_inode(dentry);
+ struct dentry *h_dentry;
+ aufs_bindex_t bindex, bend, bi;
+
+ if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */)
+ return;
+
+ bend = au_dbend(dentry);
+ bi = au_ibend(inode);
+ if (bi < bend)
+ bend = bi;
+ bindex = au_dbstart(dentry);
+ bi = au_ibstart(inode);
+ if (bi > bindex)
+ bindex = bi;
+
+ for (; bindex <= bend; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ h_inode = au_h_iptr(inode, bindex);
+ if (unlikely(h_inode != d_inode(h_dentry))) {
+ au_debug_on();
+ AuDbg("b%d, %s:%d\n", bindex, func, line);
+ AuDbgDentry(dentry);
+ AuDbgInode(inode);
+ au_debug_off();
+ BUG();
+ }
+ }
+}
+
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen)
+{
+ int err, i, j;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ AuDebugOn(err);
+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1);
+ AuDebugOn(err);
+ for (i = dpages.ndpage - 1; !err && i >= 0; i--) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ for (j = dpage->ndentry - 1; !err && j >= 0; j--)
+ AuDebugOn(au_digen_test(dentries[j], sigen));
+ }
+ au_dpages_free(&dpages);
+}
+
+void au_dbg_verify_kthread(void)
+{
+ if (au_wkq_test()) {
+ au_dbg_blocked();
+ /*
+ * It may be recursive, but udba=notify between two aufs mounts,
+ * where a single ro branch is shared, is not a problem.
+ */
+ /* WARN_ON(1); */
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_debug_init(void)
+{
+ aufs_bindex_t bindex;
+ struct au_vdir_destr destr;
+
+ bindex = -1;
+ AuDebugOn(bindex >= 0);
+
+ destr.len = -1;
+ AuDebugOn(destr.len < NAME_MAX);
+
+#ifdef CONFIG_4KSTACKS
+ pr_warn("CONFIG_4KSTACKS is defined.\n");
+#endif
+
+ return 0;
+}
diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h
new file mode 100644
index 000000000..0567f31d0
--- /dev/null
+++ b/fs/aufs/debug.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * debug print functions
+ */
+
+#ifndef __AUFS_DEBUG_H__
+#define __AUFS_DEBUG_H__
+
+#ifdef __KERNEL__
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDebugOn(a) BUG_ON(a)
+
+/* module parameter */
+extern atomic_t aufs_debug;
+static inline void au_debug_on(void)
+{
+ atomic_inc(&aufs_debug);
+}
+static inline void au_debug_off(void)
+{
+ atomic_dec_if_positive(&aufs_debug);
+}
+
+static inline int au_debug_test(void)
+{
+ return atomic_read(&aufs_debug) > 0;
+}
+#else
+#define AuDebugOn(a) do {} while (0)
+AuStubVoid(au_debug_on, void)
+AuStubVoid(au_debug_off, void)
+AuStubInt0(au_debug_test, void)
+#endif /* CONFIG_AUFS_DEBUG */
+
+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t)
+
+/* ---------------------------------------------------------------------- */
+
+/* debug print */
+
+#define AuDbg(fmt, ...) do { \
+ if (au_debug_test()) \
+ pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \
+} while (0)
+#define AuLabel(l) AuDbg(#l "\n")
+#define AuIOErr(fmt, ...) pr_err("I/O Error, " fmt, ##__VA_ARGS__)
+#define AuWarn1(fmt, ...) do { \
+ static unsigned char _c; \
+ if (!_c++) \
+ pr_warn(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuErr1(fmt, ...) do { \
+ static unsigned char _c; \
+ if (!_c++) \
+ pr_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuIOErr1(fmt, ...) do { \
+ static unsigned char _c; \
+ if (!_c++) \
+ AuIOErr(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define AuUnsupportMsg "This operation is not supported." \
+ " Please report this application to aufs-users ML."
+#define AuUnsupport(fmt, ...) do { \
+ pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \
+ dump_stack(); \
+} while (0)
+
+#define AuTraceErr(e) do { \
+ if (unlikely((e) < 0)) \
+ AuDbg("err %d\n", (int)(e)); \
+} while (0)
+
+#define AuTraceErrPtr(p) do { \
+ if (IS_ERR(p)) \
+ AuDbg("err %ld\n", PTR_ERR(p)); \
+} while (0)
+
+/* dirty macros for debug print, use with "%.*s" and caution */
+#define AuLNPair(qstr) (qstr)->len, (qstr)->name
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry;
+#ifdef CONFIG_AUFS_DEBUG
+extern struct mutex au_dbg_mtx;
+extern char *au_plevel;
+struct au_nhash;
+void au_dpri_whlist(struct au_nhash *whlist);
+struct au_vdir;
+void au_dpri_vdir(struct au_vdir *vdir);
+struct inode;
+void au_dpri_inode(struct inode *inode);
+void au_dpri_dalias(struct inode *inode);
+void au_dpri_dentry(struct dentry *dentry);
+struct file;
+void au_dpri_file(struct file *filp);
+struct super_block;
+void au_dpri_sb(struct super_block *sb);
+
+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__)
+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line);
+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen);
+void au_dbg_verify_kthread(void);
+
+int __init au_debug_init(void);
+
+#define AuDbgWhlist(w) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#w "\n"); \
+ au_dpri_whlist(w); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgVdir(v) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#v "\n"); \
+ au_dpri_vdir(v); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgInode(i) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#i "\n"); \
+ au_dpri_inode(i); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgDAlias(i) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#i "\n"); \
+ au_dpri_dalias(i); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgDentry(d) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#d "\n"); \
+ au_dpri_dentry(d); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgFile(f) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#f "\n"); \
+ au_dpri_file(f); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgSb(sb) do { \
+ mutex_lock(&au_dbg_mtx); \
+ AuDbg(#sb "\n"); \
+ au_dpri_sb(sb); \
+ mutex_unlock(&au_dbg_mtx); \
+} while (0)
+
+#define AuDbgSym(addr) do { \
+ char sym[KSYM_SYMBOL_LEN]; \
+ sprint_symbol(sym, (unsigned long)addr); \
+ AuDbg("%s\n", sym); \
+} while (0)
+#else
+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry)
+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen)
+AuStubVoid(au_dbg_verify_kthread, void)
+AuStubInt0(__init au_debug_init, void)
+
+#define AuDbgWhlist(w) do {} while (0)
+#define AuDbgVdir(v) do {} while (0)
+#define AuDbgInode(i) do {} while (0)
+#define AuDbgDAlias(i) do {} while (0)
+#define AuDbgDentry(d) do {} while (0)
+#define AuDbgFile(f) do {} while (0)
+#define AuDbgSb(sb) do {} while (0)
+#define AuDbgSym(addr) do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+int __init au_sysrq_init(void);
+void au_sysrq_fin(void);
+
+#ifdef CONFIG_HW_CONSOLE
+#define au_dbg_blocked() do { \
+ WARN_ON(1); \
+ handle_sysrq('w'); \
+} while (0)
+#else
+AuStubVoid(au_dbg_blocked, void)
+#endif
+
+#else
+AuStubInt0(__init au_sysrq_init, void)
+AuStubVoid(au_sysrq_fin, void)
+AuStubVoid(au_dbg_blocked, void)
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DEBUG_H__ */
diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c
new file mode 100644
index 000000000..e65fadd05
--- /dev/null
+++ b/fs/aufs/dentry.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#include <linux/namei.h>
+#include "aufs.h"
+
+#define AuLkup_ALLOW_NEG 1
+#define AuLkup_IGNORE_PERM (1 << 1)
+#define au_ftest_lkup(flags, name) ((flags) & AuLkup_##name)
+#define au_fset_lkup(flags, name) \
+ do { (flags) |= AuLkup_##name; } while (0)
+#define au_fclr_lkup(flags, name) \
+ do { (flags) &= ~AuLkup_##name; } while (0)
+
+struct au_do_lookup_args {
+ unsigned int flags;
+ mode_t type;
+};
+
+/*
+ * returns positive/negative dentry, NULL or an error.
+ * NULL means whiteout-ed or not-found.
+ */
+static struct dentry*
+au_do_lookup(struct dentry *h_parent, struct dentry *dentry,
+ aufs_bindex_t bindex, struct qstr *wh_name,
+ struct au_do_lookup_args *args)
+{
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct au_branch *br;
+ int wh_found, opq;
+ unsigned char wh_able;
+ const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG);
+ const unsigned char ignore_perm = !!au_ftest_lkup(args->flags,
+ IGNORE_PERM);
+
+ wh_found = 0;
+ br = au_sbr(dentry->d_sb, bindex);
+ wh_able = !!au_br_whable(br->br_perm);
+ if (wh_able)
+ wh_found = au_wh_test(h_parent, wh_name, /*try_sio*/0);
+ h_dentry = ERR_PTR(wh_found);
+ if (!wh_found)
+ goto real_lookup;
+ if (unlikely(wh_found < 0))
+ goto out;
+
+ /* We found a whiteout */
+ /* au_set_dbend(dentry, bindex); */
+ au_set_dbwh(dentry, bindex);
+ if (!allow_neg)
+ return NULL; /* success */
+
+real_lookup:
+ if (!ignore_perm)
+ h_dentry = vfsub_lkup_one(&dentry->d_name, h_parent);
+ else
+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent);
+ if (IS_ERR(h_dentry)) {
+ if (PTR_ERR(h_dentry) == -ENAMETOOLONG
+ && !allow_neg)
+ h_dentry = NULL;
+ goto out;
+ }
+
+ h_inode = d_inode(h_dentry);
+ if (d_is_negative(h_dentry)) {
+ if (!allow_neg)
+ goto out_neg;
+ } else if (wh_found
+ || (args->type && args->type != (h_inode->i_mode & S_IFMT)))
+ goto out_neg;
+
+ if (au_dbend(dentry) <= bindex)
+ au_set_dbend(dentry, bindex);
+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
+ au_set_dbstart(dentry, bindex);
+ au_set_h_dptr(dentry, bindex, h_dentry);
+
+ if (!d_is_dir(h_dentry)
+ || !wh_able
+ || (d_really_is_positive(dentry) && !d_is_dir(dentry)))
+ goto out; /* success */
+
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ opq = au_diropq_test(h_dentry);
+ inode_unlock(h_inode);
+ if (opq > 0)
+ au_set_dbdiropq(dentry, bindex);
+ else if (unlikely(opq < 0)) {
+ au_set_h_dptr(dentry, bindex, NULL);
+ h_dentry = ERR_PTR(opq);
+ }
+ goto out;
+
+out_neg:
+ dput(h_dentry);
+ h_dentry = NULL;
+out:
+ return h_dentry;
+}
+
+static int au_test_shwh(struct super_block *sb, const struct qstr *name)
+{
+ if (unlikely(!au_opt_test(au_mntflags(sb), SHWH)
+ && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)))
+ return -EPERM;
+ return 0;
+}
+
+/*
+ * returns the number of lower positive dentries,
+ * otherwise an error.
+ * can be called at unlinking with @type is zero.
+ */
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type)
+{
+ int npositive, err;
+ aufs_bindex_t bindex, btail, bdiropq;
+ unsigned char isdir, dirperm1;
+ struct qstr whname;
+ struct au_do_lookup_args args = {
+ .flags = 0,
+ .type = type
+ };
+ const struct qstr *name = &dentry->d_name;
+ struct dentry *parent;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ err = au_test_shwh(sb, name);
+ if (unlikely(err))
+ goto out;
+
+ err = au_wh_name_alloc(&whname, name);
+ if (unlikely(err))
+ goto out;
+
+ isdir = !!d_is_dir(dentry);
+ if (!type)
+ au_fset_lkup(args.flags, ALLOW_NEG);
+ dirperm1 = !!au_opt_test(au_mntflags(sb), DIRPERM1);
+
+ npositive = 0;
+ parent = dget_parent(dentry);
+ btail = au_dbtaildir(parent);
+ for (bindex = bstart; bindex <= btail; bindex++) {
+ struct dentry *h_parent, *h_dentry;
+ struct inode *h_inode, *h_dir;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry) {
+ if (d_is_positive(h_dentry))
+ npositive++;
+ if (type != S_IFDIR)
+ break;
+ continue;
+ }
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || !d_is_dir(h_parent))
+ continue;
+
+ h_dir = d_inode(h_parent);
+ inode_lock_nested(h_dir, AuLsc_I_PARENT);
+ h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname,
+ &args);
+ inode_unlock(h_dir);
+ err = PTR_ERR(h_dentry);
+ if (IS_ERR(h_dentry))
+ goto out_parent;
+ if (h_dentry)
+ au_fclr_lkup(args.flags, ALLOW_NEG);
+ if (dirperm1)
+ au_fset_lkup(args.flags, IGNORE_PERM);
+
+ if (au_dbwh(dentry) == bindex)
+ break;
+ if (!h_dentry)
+ continue;
+ if (d_is_negative(h_dentry))
+ continue;
+ h_inode = d_inode(h_dentry);
+ npositive++;
+ if (!args.type)
+ args.type = h_inode->i_mode & S_IFMT;
+ if (args.type != S_IFDIR)
+ break;
+ else if (isdir) {
+ /* the type of lower may be different */
+ bdiropq = au_dbdiropq(dentry);
+ if (bdiropq >= 0 && bdiropq <= bindex)
+ break;
+ }
+ }
+
+ if (npositive) {
+ AuLabel(positive);
+ au_update_dbstart(dentry);
+ }
+ err = npositive;
+ if (unlikely(!au_opt_test(au_mntflags(sb), UDBA_NONE)
+ && au_dbstart(dentry) < 0)) {
+ err = -EIO;
+ AuIOErr("both of real entry and whiteout found, %pd, err %d\n",
+ dentry, err);
+ }
+
+out_parent:
+ dput(parent);
+ kfree(whname.name);
+out:
+ return err;
+}
+
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+ int wkq_err;
+
+ if (!au_test_h_perm_sio(d_inode(parent), MAY_EXEC))
+ dentry = vfsub_lkup_one(name, parent);
+ else {
+ struct vfsub_lkup_one_args args = {
+ .errp = &dentry,
+ .name = name,
+ .parent = parent
+ };
+
+ wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args);
+ if (unlikely(wkq_err))
+ dentry = ERR_PTR(wkq_err);
+ }
+
+ return dentry;
+}
+
+/*
+ * lookup @dentry on @bindex which should be negative.
+ */
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh)
+{
+ int err;
+ struct dentry *parent, *h_parent, *h_dentry;
+ struct au_branch *br;
+
+ parent = dget_parent(dentry);
+ h_parent = au_h_dptr(parent, bindex);
+ br = au_sbr(dentry->d_sb, bindex);
+ if (wh)
+ h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name);
+ else
+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent);
+ err = PTR_ERR(h_dentry);
+ if (IS_ERR(h_dentry))
+ goto out;
+ if (unlikely(d_is_positive(h_dentry))) {
+ err = -EIO;
+ AuIOErr("%pd should be negative on b%d.\n", h_dentry, bindex);
+ dput(h_dentry);
+ goto out;
+ }
+
+ err = 0;
+ if (bindex < au_dbstart(dentry))
+ au_set_dbstart(dentry, bindex);
+ if (au_dbend(dentry) < bindex)
+ au_set_dbend(dentry, bindex);
+ au_set_h_dptr(dentry, bindex, h_dentry);
+
+out:
+ dput(parent);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* subset of struct inode */
+struct au_iattr {
+ unsigned long i_ino;
+ /* unsigned int i_nlink; */
+ kuid_t i_uid;
+ kgid_t i_gid;
+ u64 i_version;
+/*
+ loff_t i_size;
+ blkcnt_t i_blocks;
+*/
+ umode_t i_mode;
+};
+
+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode)
+{
+ ia->i_ino = h_inode->i_ino;
+ /* ia->i_nlink = h_inode->i_nlink; */
+ ia->i_uid = h_inode->i_uid;
+ ia->i_gid = h_inode->i_gid;
+ ia->i_version = h_inode->i_version;
+/*
+ ia->i_size = h_inode->i_size;
+ ia->i_blocks = h_inode->i_blocks;
+*/
+ ia->i_mode = (h_inode->i_mode & S_IFMT);
+}
+
+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode)
+{
+ return ia->i_ino != h_inode->i_ino
+ /* || ia->i_nlink != h_inode->i_nlink */
+ || !uid_eq(ia->i_uid, h_inode->i_uid)
+ || !gid_eq(ia->i_gid, h_inode->i_gid)
+ || ia->i_version != h_inode->i_version
+/*
+ || ia->i_size != h_inode->i_size
+ || ia->i_blocks != h_inode->i_blocks
+*/
+ || ia->i_mode != (h_inode->i_mode & S_IFMT);
+}
+
+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent,
+ struct au_branch *br)
+{
+ int err;
+ struct au_iattr ia;
+ struct inode *h_inode;
+ struct dentry *h_d;
+ struct super_block *h_sb;
+
+ err = 0;
+ memset(&ia, -1, sizeof(ia));
+ h_sb = h_dentry->d_sb;
+ h_inode = NULL;
+ if (d_is_positive(h_dentry)) {
+ h_inode = d_inode(h_dentry);
+ au_iattr_save(&ia, h_inode);
+ } else if (au_test_nfs(h_sb) || au_test_fuse(h_sb))
+ /* nfs d_revalidate may return 0 for negative dentry */
+ /* fuse d_revalidate always return 0 for negative dentry */
+ goto out;
+
+ /* main purpose is namei.c:cached_lookup() and d_revalidate */
+ h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent);
+ err = PTR_ERR(h_d);
+ if (IS_ERR(h_d))
+ goto out;
+
+ err = 0;
+ if (unlikely(h_d != h_dentry
+ || d_inode(h_d) != h_inode
+ || (h_inode && au_iattr_test(&ia, h_inode))))
+ err = au_busy_or_stale();
+ dput(h_d);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+ struct dentry *h_parent, struct au_branch *br)
+{
+ int err;
+
+ err = 0;
+ if (udba == AuOpt_UDBA_REVAL
+ && !au_test_fs_remote(h_dentry->d_sb)) {
+ IMustLock(h_dir);
+ err = (d_inode(h_dentry->d_parent) != h_dir);
+ } else if (udba != AuOpt_UDBA_NONE)
+ err = au_h_verify_dentry(h_dentry, h_parent, br);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent)
+{
+ int err;
+ aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq;
+ struct au_hdentry tmp, *p, *q;
+ struct au_dinfo *dinfo;
+ struct super_block *sb;
+
+ DiMustWriteLock(dentry);
+
+ sb = dentry->d_sb;
+ dinfo = au_di(dentry);
+ bend = dinfo->di_bend;
+ bwh = dinfo->di_bwh;
+ bdiropq = dinfo->di_bdiropq;
+ p = dinfo->di_hdentry + dinfo->di_bstart;
+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) {
+ if (!p->hd_dentry)
+ continue;
+
+ new_bindex = au_br_index(sb, p->hd_id);
+ if (new_bindex == bindex)
+ continue;
+
+ if (dinfo->di_bwh == bindex)
+ bwh = new_bindex;
+ if (dinfo->di_bdiropq == bindex)
+ bdiropq = new_bindex;
+ if (new_bindex < 0) {
+ au_hdput(p);
+ p->hd_dentry = NULL;
+ continue;
+ }
+
+ /* swap two lower dentries, and loop again */
+ q = dinfo->di_hdentry + new_bindex;
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ if (tmp.hd_dentry) {
+ bindex--;
+ p--;
+ }
+ }
+
+ dinfo->di_bwh = -1;
+ if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh))
+ dinfo->di_bwh = bwh;
+
+ dinfo->di_bdiropq = -1;
+ if (bdiropq >= 0
+ && bdiropq <= au_sbend(sb)
+ && au_sbr_whable(sb, bdiropq))
+ dinfo->di_bdiropq = bdiropq;
+
+ err = -EIO;
+ dinfo->di_bstart = -1;
+ dinfo->di_bend = -1;
+ bend = au_dbend(parent);
+ p = dinfo->di_hdentry;
+ for (bindex = 0; bindex <= bend; bindex++, p++)
+ if (p->hd_dentry) {
+ dinfo->di_bstart = bindex;
+ break;
+ }
+
+ if (dinfo->di_bstart >= 0) {
+ p = dinfo->di_hdentry + bend;
+ for (bindex = bend; bindex >= 0; bindex--, p--)
+ if (p->hd_dentry) {
+ dinfo->di_bend = bindex;
+ err = 0;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void au_do_hide(struct dentry *dentry)
+{
+ struct inode *inode;
+
+ if (d_really_is_positive(dentry)) {
+ inode = d_inode(dentry);
+ if (!d_is_dir(dentry)) {
+ if (inode->i_nlink && !d_unhashed(dentry))
+ drop_nlink(inode);
+ } else {
+ clear_nlink(inode);
+ /* stop next lookup */
+ inode->i_flags |= S_DEAD;
+ }
+ smp_mb(); /* necessary? */
+ }
+ d_drop(dentry);
+}
+
+static int au_hide_children(struct dentry *parent)
+{
+ int err, i, j, ndentry;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry *dentry;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, parent, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ /* in reverse order */
+ for (i = dpages.ndpage - 1; i >= 0; i--) {
+ dpage = dpages.dpages + i;
+ ndentry = dpage->ndentry;
+ for (j = ndentry - 1; j >= 0; j--) {
+ dentry = dpage->dentries[j];
+ if (dentry != parent)
+ au_do_hide(dentry);
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static void au_hide(struct dentry *dentry)
+{
+ int err;
+
+ AuDbgDentry(dentry);
+ if (d_is_dir(dentry)) {
+ /* shrink_dcache_parent(dentry); */
+ err = au_hide_children(dentry);
+ if (unlikely(err))
+ AuIOErr("%pd, failed hiding children, ignored %d\n",
+ dentry, err);
+ }
+ au_do_hide(dentry);
+}
+
+/*
+ * By adding a dirty branch, a cached dentry may be affected in various ways.
+ *
+ * a dirty branch is added
+ * - on the top of layers
+ * - in the middle of layers
+ * - to the bottom of layers
+ *
+ * on the added branch there exists
+ * - a whiteout
+ * - a diropq
+ * - a same named entry
+ * + exist
+ * * negative --> positive
+ * * positive --> positive
+ * - type is unchanged
+ * - type is changed
+ * + doesn't exist
+ * * negative --> negative
+ * * positive --> negative (rejected by au_br_del() for non-dir case)
+ * - none
+ */
+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo,
+ struct au_dinfo *tmp)
+{
+ int err;
+ aufs_bindex_t bindex, bend;
+ struct {
+ struct dentry *dentry;
+ struct inode *inode;
+ mode_t mode;
+ } orig_h, tmp_h = {
+ .dentry = NULL
+ };
+ struct au_hdentry *hd;
+ struct inode *inode, *h_inode;
+ struct dentry *h_dentry;
+
+ err = 0;
+ AuDebugOn(dinfo->di_bstart < 0);
+ orig_h.mode = 0;
+ orig_h.dentry = dinfo->di_hdentry[dinfo->di_bstart].hd_dentry;
+ orig_h.inode = NULL;
+ if (d_is_positive(orig_h.dentry)) {
+ orig_h.inode = d_inode(orig_h.dentry);
+ orig_h.mode = orig_h.inode->i_mode & S_IFMT;
+ }
+ if (tmp->di_bstart >= 0) {
+ tmp_h.dentry = tmp->di_hdentry[tmp->di_bstart].hd_dentry;
+ if (d_is_positive(tmp_h.dentry)) {
+ tmp_h.inode = d_inode(tmp_h.dentry);
+ tmp_h.mode = tmp_h.inode->i_mode & S_IFMT;
+ }
+ }
+
+ inode = NULL;
+ if (d_really_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (!orig_h.inode) {
+ AuDbg("nagative originally\n");
+ if (inode) {
+ au_hide(dentry);
+ goto out;
+ }
+ AuDebugOn(inode);
+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
+ AuDebugOn(dinfo->di_bdiropq != -1);
+
+ if (!tmp_h.inode) {
+ AuDbg("negative --> negative\n");
+ /* should have only one negative lower */
+ if (tmp->di_bstart >= 0
+ && tmp->di_bstart < dinfo->di_bstart) {
+ AuDebugOn(tmp->di_bstart != tmp->di_bend);
+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend);
+ au_set_h_dptr(dentry, dinfo->di_bstart, NULL);
+ au_di_cp(dinfo, tmp);
+ hd = tmp->di_hdentry + tmp->di_bstart;
+ au_set_h_dptr(dentry, tmp->di_bstart,
+ dget(hd->hd_dentry));
+ }
+ au_dbg_verify_dinode(dentry);
+ } else {
+ AuDbg("negative --> positive\n");
+ /*
+ * similar to the behaviour of creating with bypassing
+ * aufs.
+ * unhash it in order to force an error in the
+ * succeeding create operation.
+ * we should not set S_DEAD here.
+ */
+ d_drop(dentry);
+ /* au_di_swap(tmp, dinfo); */
+ au_dbg_verify_dinode(dentry);
+ }
+ } else {
+ AuDbg("positive originally\n");
+ /* inode may be NULL */
+ AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode);
+ if (!tmp_h.inode) {
+ AuDbg("positive --> negative\n");
+ /* or bypassing aufs */
+ au_hide(dentry);
+ if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_bstart)
+ dinfo->di_bwh = tmp->di_bwh;
+ if (inode)
+ err = au_refresh_hinode_self(inode);
+ au_dbg_verify_dinode(dentry);
+ } else if (orig_h.mode == tmp_h.mode) {
+ AuDbg("positive --> positive, same type\n");
+ if (!S_ISDIR(orig_h.mode)
+ && dinfo->di_bstart > tmp->di_bstart) {
+ /*
+ * similar to the behaviour of removing and
+ * creating.
+ */
+ au_hide(dentry);
+ if (inode)
+ err = au_refresh_hinode_self(inode);
+ au_dbg_verify_dinode(dentry);
+ } else {
+ /* fill empty slots */
+ if (dinfo->di_bstart > tmp->di_bstart)
+ dinfo->di_bstart = tmp->di_bstart;
+ if (dinfo->di_bend < tmp->di_bend)
+ dinfo->di_bend = tmp->di_bend;
+ dinfo->di_bwh = tmp->di_bwh;
+ dinfo->di_bdiropq = tmp->di_bdiropq;
+ hd = tmp->di_hdentry;
+ bend = dinfo->di_bend;
+ for (bindex = tmp->di_bstart; bindex <= bend;
+ bindex++) {
+ if (au_h_dptr(dentry, bindex))
+ continue;
+ h_dentry = hd[bindex].hd_dentry;
+ if (!h_dentry)
+ continue;
+ AuDebugOn(d_is_negative(h_dentry));
+ h_inode = d_inode(h_dentry);
+ AuDebugOn(orig_h.mode
+ != (h_inode->i_mode
+ & S_IFMT));
+ au_set_h_dptr(dentry, bindex,
+ dget(h_dentry));
+ }
+ err = au_refresh_hinode(inode, dentry);
+ au_dbg_verify_dinode(dentry);
+ }
+ } else {
+ AuDbg("positive --> positive, different type\n");
+ /* similar to the behaviour of removing and creating */
+ au_hide(dentry);
+ if (inode)
+ err = au_refresh_hinode_self(inode);
+ au_dbg_verify_dinode(dentry);
+ }
+ }
+
+out:
+ return err;
+}
+
+void au_refresh_dop(struct dentry *dentry, int force_reval)
+{
+ const struct dentry_operations *dop
+ = force_reval ? &aufs_dop : dentry->d_sb->s_d_op;
+ static const unsigned int mask
+ = DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE;
+
+ BUILD_BUG_ON(sizeof(mask) != sizeof(dentry->d_flags));
+
+ if (dentry->d_op == dop)
+ return;
+
+ AuDbg("%pd\n", dentry);
+ spin_lock(&dentry->d_lock);
+ if (dop == &aufs_dop)
+ dentry->d_flags |= mask;
+ else
+ dentry->d_flags &= ~mask;
+ dentry->d_op = dop;
+ spin_unlock(&dentry->d_lock);
+}
+
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent)
+{
+ int err, ebrange;
+ unsigned int sigen;
+ struct au_dinfo *dinfo, *tmp;
+ struct super_block *sb;
+ struct inode *inode;
+
+ DiMustWriteLock(dentry);
+ AuDebugOn(IS_ROOT(dentry));
+ AuDebugOn(d_really_is_negative(parent));
+
+ sb = dentry->d_sb;
+ sigen = au_sigen(sb);
+ err = au_digen_test(parent, sigen);
+ if (unlikely(err))
+ goto out;
+
+ dinfo = au_di(dentry);
+ err = au_di_realloc(dinfo, au_sbend(sb) + 1);
+ if (unlikely(err))
+ goto out;
+ ebrange = au_dbrange_test(dentry);
+ if (!ebrange)
+ ebrange = au_do_refresh_hdentry(dentry, parent);
+
+ if (d_unhashed(dentry) || ebrange /* || dinfo->di_tmpfile */) {
+ AuDebugOn(au_dbstart(dentry) < 0 && au_dbend(dentry) >= 0);
+ if (d_really_is_positive(dentry)) {
+ inode = d_inode(dentry);
+ err = au_refresh_hinode_self(inode);
+ }
+ au_dbg_verify_dinode(dentry);
+ if (!err)
+ goto out_dgen; /* success */
+ goto out;
+ }
+
+ /* temporary dinfo */
+ AuDbgDentry(dentry);
+ err = -ENOMEM;
+ tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+ if (unlikely(!tmp))
+ goto out;
+ au_di_swap(tmp, dinfo);
+ /* returns the number of positive dentries */
+ /*
+ * if current working dir is removed, it returns an error.
+ * but the dentry is legal.
+ */
+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0);
+ AuDbgDentry(dentry);
+ au_di_swap(tmp, dinfo);
+ if (err == -ENOENT)
+ err = 0;
+ if (err >= 0) {
+ /* compare/refresh by dinfo */
+ AuDbgDentry(dentry);
+ err = au_refresh_by_dinfo(dentry, dinfo, tmp);
+ au_dbg_verify_dinode(dentry);
+ AuTraceErr(err);
+ }
+ au_rw_write_unlock(&tmp->di_rwsem);
+ au_di_free(tmp);
+ if (unlikely(err))
+ goto out;
+
+out_dgen:
+ au_update_digen(dentry);
+out:
+ if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) {
+ AuIOErr("failed refreshing %pd, %d\n", dentry, err);
+ AuDbgDentry(dentry);
+ }
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags,
+ struct dentry *dentry, aufs_bindex_t bindex)
+{
+ int err, valid;
+
+ err = 0;
+ if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE))
+ goto out;
+
+ AuDbg("b%d\n", bindex);
+ /*
+ * gave up supporting LOOKUP_CREATE/OPEN for lower fs,
+ * due to whiteout and branch permission.
+ */
+ flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE
+ | LOOKUP_FOLLOW | LOOKUP_EXCL);
+ /* it may return tri-state */
+ valid = h_dentry->d_op->d_revalidate(h_dentry, flags);
+
+ if (unlikely(valid < 0))
+ err = valid;
+ else if (!valid)
+ err = -EINVAL;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* todo: remove this */
+static int h_d_revalidate(struct dentry *dentry, struct inode *inode,
+ unsigned int flags, int do_udba)
+{
+ int err;
+ umode_t mode, h_mode;
+ aufs_bindex_t bindex, btail, bstart, ibs, ibe;
+ unsigned char plus, unhashed, is_root, h_plus, h_nfs, tmpfile;
+ struct inode *h_inode, *h_cached_inode;
+ struct dentry *h_dentry;
+ struct qstr *name, *h_name;
+
+ err = 0;
+ plus = 0;
+ mode = 0;
+ ibs = -1;
+ ibe = -1;
+ unhashed = !!d_unhashed(dentry);
+ is_root = !!IS_ROOT(dentry);
+ name = &dentry->d_name;
+ tmpfile = au_di(dentry)->di_tmpfile;
+
+ /*
+ * Theoretically, REVAL test should be unnecessary in case of
+ * {FS,I}NOTIFY.
+ * But {fs,i}notify doesn't fire some necessary events,
+ * IN_ATTRIB for atime/nlink/pageio
+ * Let's do REVAL test too.
+ */
+ if (do_udba && inode) {
+ mode = (inode->i_mode & S_IFMT);
+ plus = (inode->i_nlink > 0);
+ ibs = au_ibstart(inode);
+ ibe = au_ibend(inode);
+ }
+
+ bstart = au_dbstart(dentry);
+ btail = bstart;
+ if (inode && S_ISDIR(inode->i_mode))
+ btail = au_dbtaildir(dentry);
+ for (bindex = bstart; bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+
+ AuDbg("b%d, %pd\n", bindex, h_dentry);
+ h_nfs = !!au_test_nfs(h_dentry->d_sb);
+ spin_lock(&h_dentry->d_lock);
+ h_name = &h_dentry->d_name;
+ if (unlikely(do_udba
+ && !is_root
+ && ((!h_nfs
+ && (unhashed != !!d_unhashed(h_dentry)
+ || (!tmpfile
+ && !au_qstreq(name, h_name))
+ ))
+ || (h_nfs
+ && !(flags & LOOKUP_OPEN)
+ && (h_dentry->d_flags
+ & DCACHE_NFSFS_RENAMED)))
+ )) {
+ int h_unhashed;
+
+ h_unhashed = d_unhashed(h_dentry);
+ spin_unlock(&h_dentry->d_lock);
+ AuDbg("unhash 0x%x 0x%x, %pd %pd\n",
+ unhashed, h_unhashed, dentry, h_dentry);
+ goto err;
+ }
+ spin_unlock(&h_dentry->d_lock);
+
+ err = au_do_h_d_reval(h_dentry, flags, dentry, bindex);
+ if (unlikely(err))
+ /* do not goto err, to keep the errno */
+ break;
+
+ /* todo: plink too? */
+ if (!do_udba)
+ continue;
+
+ /* UDBA tests */
+ if (unlikely(!!inode != d_is_positive(h_dentry)))
+ goto err;
+
+ h_inode = NULL;
+ if (d_is_positive(h_dentry))
+ h_inode = d_inode(h_dentry);
+ h_plus = plus;
+ h_mode = mode;
+ h_cached_inode = h_inode;
+ if (h_inode) {
+ h_mode = (h_inode->i_mode & S_IFMT);
+ h_plus = (h_inode->i_nlink > 0);
+ }
+ if (inode && ibs <= bindex && bindex <= ibe)
+ h_cached_inode = au_h_iptr(inode, bindex);
+
+ if (!h_nfs) {
+ if (unlikely(plus != h_plus && !tmpfile))
+ goto err;
+ } else {
+ if (unlikely(!(h_dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ && !is_root
+ && !IS_ROOT(h_dentry)
+ && unhashed != d_unhashed(h_dentry)))
+ goto err;
+ }
+ if (unlikely(mode != h_mode
+ || h_cached_inode != h_inode))
+ goto err;
+ continue;
+
+err:
+ err = -EINVAL;
+ break;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+/* todo: consolidate with do_refresh() and au_reval_for_attr() */
+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+ struct dentry *parent;
+
+ if (!au_digen_test(dentry, sigen))
+ return 0;
+
+ parent = dget_parent(dentry);
+ di_read_lock_parent(parent, AuLock_IR);
+ AuDebugOn(au_digen_test(parent, sigen));
+ au_dbg_verify_gen(parent, sigen);
+ err = au_refresh_dentry(dentry, parent);
+ di_read_unlock(parent, AuLock_IR);
+ dput(parent);
+ AuTraceErr(err);
+ return err;
+}
+
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+ struct dentry *d, *parent;
+
+ if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR))
+ return simple_reval_dpath(dentry, sigen);
+
+ /* slow loop, keep it simple and stupid */
+ /* cf: au_cpup_dirs() */
+ err = 0;
+ parent = NULL;
+ while (au_digen_test(dentry, sigen)) {
+ d = dentry;
+ while (1) {
+ dput(parent);
+ parent = dget_parent(d);
+ if (!au_digen_test(parent, sigen))
+ break;
+ d = parent;
+ }
+
+ if (d != dentry)
+ di_write_lock_child2(d);
+
+ /* someone might update our dentry while we were sleeping */
+ if (au_digen_test(d, sigen)) {
+ /*
+ * todo: consolidate with simple_reval_dpath(),
+ * do_refresh() and au_reval_for_attr().
+ */
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_refresh_dentry(d, parent);
+ di_read_unlock(parent, AuLock_IR);
+ }
+
+ if (d != dentry)
+ di_write_unlock(d);
+ dput(parent);
+ if (unlikely(err))
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * if valid returns 1, otherwise 0.
+ */
+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ int valid, err;
+ unsigned int sigen;
+ unsigned char do_udba;
+ struct super_block *sb;
+ struct inode *inode;
+
+ /* todo: support rcu-walk? */
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ valid = 0;
+ if (unlikely(!au_di(dentry)))
+ goto out;
+
+ valid = 1;
+ sb = dentry->d_sb;
+ /*
+ * todo: very ugly
+ * i_mutex of parent dir may be held,
+ * but we should not return 'invalid' due to busy.
+ */
+ err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM);
+ if (unlikely(err)) {
+ valid = err;
+ AuTraceErr(err);
+ goto out;
+ }
+ inode = NULL;
+ if (d_really_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (unlikely(inode && is_bad_inode(inode))) {
+ err = -EINVAL;
+ AuTraceErr(err);
+ goto out_dgrade;
+ }
+ if (unlikely(au_dbrange_test(dentry))) {
+ err = -EINVAL;
+ AuTraceErr(err);
+ goto out_dgrade;
+ }
+
+ sigen = au_sigen(sb);
+ if (au_digen_test(dentry, sigen)) {
+ AuDebugOn(IS_ROOT(dentry));
+ err = au_reval_dpath(dentry, sigen);
+ if (unlikely(err)) {
+ AuTraceErr(err);
+ goto out_dgrade;
+ }
+ }
+ di_downgrade_lock(dentry, AuLock_IR);
+
+ err = -EINVAL;
+ if (!(flags & (LOOKUP_OPEN | LOOKUP_EMPTY))
+ && inode
+ && !(inode->i_state && I_LINKABLE)
+ && (IS_DEADDIR(inode) || !inode->i_nlink)) {
+ AuTraceErr(err);
+ goto out_inval;
+ }
+
+ do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE);
+ if (do_udba && inode) {
+ aufs_bindex_t bstart = au_ibstart(inode);
+ struct inode *h_inode;
+
+ if (bstart >= 0) {
+ h_inode = au_h_iptr(inode, bstart);
+ if (h_inode && au_test_higen(inode, h_inode)) {
+ AuTraceErr(err);
+ goto out_inval;
+ }
+ }
+ }
+
+ err = h_d_revalidate(dentry, inode, flags, do_udba);
+ if (unlikely(!err && do_udba && au_dbstart(dentry) < 0)) {
+ err = -EIO;
+ AuDbg("both of real entry and whiteout found, %p, err %d\n",
+ dentry, err);
+ }
+ goto out_inval;
+
+out_dgrade:
+ di_downgrade_lock(dentry, AuLock_IR);
+out_inval:
+ aufs_read_unlock(dentry, AuLock_IR);
+ AuTraceErr(err);
+ valid = !err;
+out:
+ if (!valid) {
+ AuDbg("%pd invalid, %d\n", dentry, valid);
+ d_drop(dentry);
+ }
+ return valid;
+}
+
+static void aufs_d_release(struct dentry *dentry)
+{
+ if (au_di(dentry)) {
+ au_di_fin(dentry);
+ au_hn_di_reinit(dentry);
+ }
+}
+
+const struct dentry_operations aufs_dop = {
+ .d_revalidate = aufs_d_revalidate,
+ .d_weak_revalidate = aufs_d_revalidate,
+ .d_release = aufs_d_release
+};
+
+/* aufs_dop without d_revalidate */
+const struct dentry_operations aufs_dop_noreval = {
+ .d_release = aufs_d_release
+};
diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h
new file mode 100644
index 000000000..c794adf59
--- /dev/null
+++ b/fs/aufs/dentry.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * lookup and dentry operations
+ */
+
+#ifndef __AUFS_DENTRY_H__
+#define __AUFS_DENTRY_H__
+
+#ifdef __KERNEL__
+
+#include <linux/dcache.h>
+#include "rwsem.h"
+
+struct au_hdentry {
+ struct dentry *hd_dentry;
+ aufs_bindex_t hd_id;
+};
+
+struct au_dinfo {
+ atomic_t di_generation;
+
+ struct au_rwsem di_rwsem;
+ aufs_bindex_t di_bstart, di_bend, di_bwh, di_bdiropq;
+ unsigned char di_tmpfile; /* to allow the different name */
+ struct au_hdentry *di_hdentry;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dentry.c */
+extern const struct dentry_operations aufs_dop, aufs_dop_noreval;
+struct au_branch;
+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent);
+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir,
+ struct dentry *h_parent, struct au_branch *br);
+
+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type);
+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh);
+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent);
+int au_reval_dpath(struct dentry *dentry, unsigned int sigen);
+void au_refresh_dop(struct dentry *dentry, int force_reval);
+
+/* dinfo.c */
+void au_di_init_once(void *_di);
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc);
+void au_di_free(struct au_dinfo *dinfo);
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b);
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src);
+int au_di_init(struct dentry *dentry);
+void au_di_fin(struct dentry *dentry);
+int au_di_realloc(struct au_dinfo *dinfo, int nbr);
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc);
+void di_read_unlock(struct dentry *d, int flags);
+void di_downgrade_lock(struct dentry *d, int flags);
+void di_write_lock(struct dentry *d, unsigned int lsc);
+void di_write_unlock(struct dentry *d);
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir);
+void di_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex);
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex);
+aufs_bindex_t au_dbtail(struct dentry *dentry);
+aufs_bindex_t au_dbtaildir(struct dentry *dentry);
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_dentry);
+int au_digen_test(struct dentry *dentry, unsigned int sigen);
+int au_dbrange_test(struct dentry *dentry);
+void au_update_digen(struct dentry *dentry);
+void au_update_dbrange(struct dentry *dentry, int do_put_zero);
+void au_update_dbstart(struct dentry *dentry);
+void au_update_dbend(struct dentry *dentry);
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_dinfo *au_di(struct dentry *dentry)
+{
+ return dentry->d_fsdata;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for dinfo */
+enum {
+ AuLsc_DI_CHILD, /* child first */
+ AuLsc_DI_CHILD2, /* rename(2), link(2), and cpup at hnotify */
+ AuLsc_DI_CHILD3, /* copyup dirs */
+ AuLsc_DI_PARENT,
+ AuLsc_DI_PARENT2,
+ AuLsc_DI_PARENT3,
+ AuLsc_DI_TMP /* temp for replacing dinfo */
+};
+
+/*
+ * di_read_lock_child, di_write_lock_child,
+ * di_read_lock_child2, di_write_lock_child2,
+ * di_read_lock_child3, di_write_lock_child3,
+ * di_read_lock_parent, di_write_lock_parent,
+ * di_read_lock_parent2, di_write_lock_parent2,
+ * di_read_lock_parent3, di_write_lock_parent3,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void di_read_lock_##name(struct dentry *d, int flags) \
+{ di_read_lock(d, flags, AuLsc_DI_##lsc); }
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void di_write_lock_##name(struct dentry *d) \
+{ di_write_lock(d, AuLsc_DI_##lsc); }
+
+#define AuRWLockFuncs(name, lsc) \
+ AuReadLockFunc(name, lsc) \
+ AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+#define DiMustNoWaiters(d) AuRwMustNoWaiters(&au_di(d)->di_rwsem)
+#define DiMustAnyLock(d) AuRwMustAnyLock(&au_di(d)->di_rwsem)
+#define DiMustWriteLock(d) AuRwMustWriteLock(&au_di(d)->di_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: memory barrier? */
+static inline unsigned int au_digen(struct dentry *d)
+{
+ return atomic_read(&au_di(d)->di_generation);
+}
+
+static inline void au_h_dentry_init(struct au_hdentry *hdentry)
+{
+ hdentry->hd_dentry = NULL;
+}
+
+static inline void au_hdput(struct au_hdentry *hd)
+{
+ if (hd)
+ dput(hd->hd_dentry);
+}
+
+static inline aufs_bindex_t au_dbstart(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bstart;
+}
+
+static inline aufs_bindex_t au_dbend(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bend;
+}
+
+static inline aufs_bindex_t au_dbwh(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bwh;
+}
+
+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry)
+{
+ DiMustAnyLock(dentry);
+ return au_di(dentry)->di_bdiropq;
+}
+
+/* todo: hard/soft set? */
+static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ au_di(dentry)->di_bstart = bindex;
+}
+
+static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ au_di(dentry)->di_bend = bindex;
+}
+
+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ /* dbwh can be outside of bstart - bend range */
+ au_di(dentry)->di_bwh = bindex;
+}
+
+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ DiMustWriteLock(dentry);
+ au_di(dentry)->di_bdiropq = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_HNOTIFY
+static inline void au_digen_dec(struct dentry *d)
+{
+ atomic_dec(&au_di(d)->di_generation);
+}
+
+static inline void au_hn_di_reinit(struct dentry *dentry)
+{
+ dentry->d_fsdata = NULL;
+}
+#else
+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DENTRY_H__ */
diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c
new file mode 100644
index 000000000..ad6d045c4
--- /dev/null
+++ b/fs/aufs/dinfo.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * dentry private data
+ */
+
+#include "aufs.h"
+
+void au_di_init_once(void *_dinfo)
+{
+ struct au_dinfo *dinfo = _dinfo;
+ static struct lock_class_key aufs_di;
+
+ au_rw_init(&dinfo->di_rwsem);
+ au_rw_class(&dinfo->di_rwsem, &aufs_di);
+}
+
+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc)
+{
+ struct au_dinfo *dinfo;
+ int nbr, i;
+
+ dinfo = au_cache_alloc_dinfo();
+ if (unlikely(!dinfo))
+ goto out;
+
+ nbr = au_sbend(sb) + 1;
+ if (nbr <= 0)
+ nbr = 1;
+ dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS);
+ if (dinfo->di_hdentry) {
+ au_rw_write_lock_nested(&dinfo->di_rwsem, lsc);
+ dinfo->di_bstart = -1;
+ dinfo->di_bend = -1;
+ dinfo->di_bwh = -1;
+ dinfo->di_bdiropq = -1;
+ dinfo->di_tmpfile = 0;
+ for (i = 0; i < nbr; i++)
+ dinfo->di_hdentry[i].hd_id = -1;
+ goto out;
+ }
+
+ au_cache_free_dinfo(dinfo);
+ dinfo = NULL;
+
+out:
+ return dinfo;
+}
+
+void au_di_free(struct au_dinfo *dinfo)
+{
+ struct au_hdentry *p;
+ aufs_bindex_t bend, bindex;
+
+ /* dentry may not be revalidated */
+ bindex = dinfo->di_bstart;
+ if (bindex >= 0) {
+ bend = dinfo->di_bend;
+ p = dinfo->di_hdentry + bindex;
+ while (bindex++ <= bend)
+ au_hdput(p++);
+ }
+ kfree(dinfo->di_hdentry);
+ au_cache_free_dinfo(dinfo);
+}
+
+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b)
+{
+ struct au_hdentry *p;
+ aufs_bindex_t bi;
+
+ AuRwMustWriteLock(&a->di_rwsem);
+ AuRwMustWriteLock(&b->di_rwsem);
+
+#define DiSwap(v, name) \
+ do { \
+ v = a->di_##name; \
+ a->di_##name = b->di_##name; \
+ b->di_##name = v; \
+ } while (0)
+
+ DiSwap(p, hdentry);
+ DiSwap(bi, bstart);
+ DiSwap(bi, bend);
+ DiSwap(bi, bwh);
+ DiSwap(bi, bdiropq);
+ /* smp_mb(); */
+
+#undef DiSwap
+}
+
+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src)
+{
+ AuRwMustWriteLock(&dst->di_rwsem);
+ AuRwMustWriteLock(&src->di_rwsem);
+
+ dst->di_bstart = src->di_bstart;
+ dst->di_bend = src->di_bend;
+ dst->di_bwh = src->di_bwh;
+ dst->di_bdiropq = src->di_bdiropq;
+ /* smp_mb(); */
+}
+
+int au_di_init(struct dentry *dentry)
+{
+ int err;
+ struct super_block *sb;
+ struct au_dinfo *dinfo;
+
+ err = 0;
+ sb = dentry->d_sb;
+ dinfo = au_di_alloc(sb, AuLsc_DI_CHILD);
+ if (dinfo) {
+ atomic_set(&dinfo->di_generation, au_sigen(sb));
+ /* smp_mb(); */ /* atomic_set */
+ dentry->d_fsdata = dinfo;
+ } else
+ err = -ENOMEM;
+
+ return err;
+}
+
+void au_di_fin(struct dentry *dentry)
+{
+ struct au_dinfo *dinfo;
+
+ dinfo = au_di(dentry);
+ AuRwDestroy(&dinfo->di_rwsem);
+ au_di_free(dinfo);
+}
+
+int au_di_realloc(struct au_dinfo *dinfo, int nbr)
+{
+ int err, sz;
+ struct au_hdentry *hdp;
+
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ err = -ENOMEM;
+ sz = sizeof(*hdp) * (dinfo->di_bend + 1);
+ if (!sz)
+ sz = sizeof(*hdp);
+ hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS);
+ if (hdp) {
+ dinfo->di_hdentry = hdp;
+ err = 0;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void do_ii_write_lock(struct inode *inode, unsigned int lsc)
+{
+ switch (lsc) {
+ case AuLsc_DI_CHILD:
+ ii_write_lock_child(inode);
+ break;
+ case AuLsc_DI_CHILD2:
+ ii_write_lock_child2(inode);
+ break;
+ case AuLsc_DI_CHILD3:
+ ii_write_lock_child3(inode);
+ break;
+ case AuLsc_DI_PARENT:
+ ii_write_lock_parent(inode);
+ break;
+ case AuLsc_DI_PARENT2:
+ ii_write_lock_parent2(inode);
+ break;
+ case AuLsc_DI_PARENT3:
+ ii_write_lock_parent3(inode);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void do_ii_read_lock(struct inode *inode, unsigned int lsc)
+{
+ switch (lsc) {
+ case AuLsc_DI_CHILD:
+ ii_read_lock_child(inode);
+ break;
+ case AuLsc_DI_CHILD2:
+ ii_read_lock_child2(inode);
+ break;
+ case AuLsc_DI_CHILD3:
+ ii_read_lock_child3(inode);
+ break;
+ case AuLsc_DI_PARENT:
+ ii_read_lock_parent(inode);
+ break;
+ case AuLsc_DI_PARENT2:
+ ii_read_lock_parent2(inode);
+ break;
+ case AuLsc_DI_PARENT3:
+ ii_read_lock_parent3(inode);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void di_read_lock(struct dentry *d, int flags, unsigned int lsc)
+{
+ struct inode *inode;
+
+ au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc);
+ if (d_really_is_positive(d)) {
+ inode = d_inode(d);
+ if (au_ftest_lock(flags, IW))
+ do_ii_write_lock(inode, lsc);
+ else if (au_ftest_lock(flags, IR))
+ do_ii_read_lock(inode, lsc);
+ }
+}
+
+void di_read_unlock(struct dentry *d, int flags)
+{
+ struct inode *inode;
+
+ if (d_really_is_positive(d)) {
+ inode = d_inode(d);
+ if (au_ftest_lock(flags, IW)) {
+ au_dbg_verify_dinode(d);
+ ii_write_unlock(inode);
+ } else if (au_ftest_lock(flags, IR)) {
+ au_dbg_verify_dinode(d);
+ ii_read_unlock(inode);
+ }
+ }
+ au_rw_read_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_downgrade_lock(struct dentry *d, int flags)
+{
+ if (d_really_is_positive(d) && au_ftest_lock(flags, IR))
+ ii_downgrade_lock(d_inode(d));
+ au_rw_dgrade_lock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock(struct dentry *d, unsigned int lsc)
+{
+ au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc);
+ if (d_really_is_positive(d))
+ do_ii_write_lock(d_inode(d), lsc);
+}
+
+void di_write_unlock(struct dentry *d)
+{
+ au_dbg_verify_dinode(d);
+ if (d_really_is_positive(d))
+ ii_write_unlock(d_inode(d));
+ au_rw_write_unlock(&au_di(d)->di_rwsem);
+}
+
+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir)
+{
+ AuDebugOn(d1 == d2
+ || d_inode(d1) == d_inode(d2)
+ || d1->d_sb != d2->d_sb);
+
+ if (isdir && au_test_subdir(d1, d2)) {
+ di_write_lock_child(d1);
+ di_write_lock_child2(d2);
+ } else {
+ /* there should be no races */
+ di_write_lock_child(d2);
+ di_write_lock_child2(d1);
+ }
+}
+
+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir)
+{
+ AuDebugOn(d1 == d2
+ || d_inode(d1) == d_inode(d2)
+ || d1->d_sb != d2->d_sb);
+
+ if (isdir && au_test_subdir(d1, d2)) {
+ di_write_lock_parent(d1);
+ di_write_lock_parent2(d2);
+ } else {
+ /* there should be no races */
+ di_write_lock_parent(d2);
+ di_write_lock_parent2(d1);
+ }
+}
+
+void di_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+ di_write_unlock(d1);
+ if (d_inode(d1) == d_inode(d2))
+ au_rw_write_unlock(&au_di(d2)->di_rwsem);
+ else
+ di_write_unlock(d2);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ struct dentry *d;
+
+ DiMustAnyLock(dentry);
+
+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry))
+ return NULL;
+ AuDebugOn(bindex < 0);
+ d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry;
+ AuDebugOn(d && au_dcount(d) <= 0);
+ return d;
+}
+
+/*
+ * extended version of au_h_dptr().
+ * returns a hashed and positive (or linkable) h_dentry in bindex, NULL, or
+ * error.
+ */
+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ struct dentry *h_dentry;
+ struct inode *inode, *h_inode;
+
+ AuDebugOn(d_really_is_negative(dentry));
+
+ h_dentry = NULL;
+ if (au_dbstart(dentry) <= bindex
+ && bindex <= au_dbend(dentry))
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && !au_d_linkable(h_dentry)) {
+ dget(h_dentry);
+ goto out; /* success */
+ }
+
+ inode = d_inode(dentry);
+ AuDebugOn(bindex < au_ibstart(inode));
+ AuDebugOn(au_ibend(inode) < bindex);
+ h_inode = au_h_iptr(inode, bindex);
+ h_dentry = d_find_alias(h_inode);
+ if (h_dentry) {
+ if (!IS_ERR(h_dentry)) {
+ if (!au_d_linkable(h_dentry))
+ goto out; /* success */
+ dput(h_dentry);
+ } else
+ goto out;
+ }
+
+ if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) {
+ h_dentry = au_plink_lkup(inode, bindex);
+ AuDebugOn(!h_dentry);
+ if (!IS_ERR(h_dentry)) {
+ if (!au_d_hashed_positive(h_dentry))
+ goto out; /* success */
+ dput(h_dentry);
+ h_dentry = NULL;
+ }
+ }
+
+out:
+ AuDbgDentry(h_dentry);
+ return h_dentry;
+}
+
+aufs_bindex_t au_dbtail(struct dentry *dentry)
+{
+ aufs_bindex_t bend, bwh;
+
+ bend = au_dbend(dentry);
+ if (0 <= bend) {
+ bwh = au_dbwh(dentry);
+ if (!bwh)
+ return bwh;
+ if (0 < bwh && bwh < bend)
+ return bwh - 1;
+ }
+ return bend;
+}
+
+aufs_bindex_t au_dbtaildir(struct dentry *dentry)
+{
+ aufs_bindex_t bend, bopq;
+
+ bend = au_dbtail(dentry);
+ if (0 <= bend) {
+ bopq = au_dbdiropq(dentry);
+ if (0 <= bopq && bopq < bend)
+ bend = bopq;
+ }
+ return bend;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_dentry)
+{
+ struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex;
+ struct au_branch *br;
+
+ DiMustWriteLock(dentry);
+
+ au_hdput(hd);
+ hd->hd_dentry = h_dentry;
+ if (h_dentry) {
+ br = au_sbr(dentry->d_sb, bindex);
+ hd->hd_id = br->br_id;
+ }
+}
+
+int au_dbrange_test(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bstart, bend;
+
+ err = 0;
+ bstart = au_dbstart(dentry);
+ bend = au_dbend(dentry);
+ if (bstart >= 0)
+ AuDebugOn(bend < 0 && bstart > bend);
+ else {
+ err = -EIO;
+ AuDebugOn(bend >= 0);
+ }
+
+ return err;
+}
+
+int au_digen_test(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+
+ err = 0;
+ if (unlikely(au_digen(dentry) != sigen
+ || au_iigen_test(d_inode(dentry), sigen)))
+ err = -EIO;
+
+ return err;
+}
+
+void au_update_digen(struct dentry *dentry)
+{
+ atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb));
+ /* smp_mb(); */ /* atomic_set */
+}
+
+void au_update_dbrange(struct dentry *dentry, int do_put_zero)
+{
+ struct au_dinfo *dinfo;
+ struct dentry *h_d;
+ struct au_hdentry *hdp;
+
+ DiMustWriteLock(dentry);
+
+ dinfo = au_di(dentry);
+ if (!dinfo || dinfo->di_bstart < 0)
+ return;
+
+ hdp = dinfo->di_hdentry;
+ if (do_put_zero) {
+ aufs_bindex_t bindex, bend;
+
+ bend = dinfo->di_bend;
+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) {
+ h_d = hdp[0 + bindex].hd_dentry;
+ if (h_d && d_is_negative(h_d))
+ au_set_h_dptr(dentry, bindex, NULL);
+ }
+ }
+
+ dinfo->di_bstart = -1;
+ while (++dinfo->di_bstart <= dinfo->di_bend)
+ if (hdp[0 + dinfo->di_bstart].hd_dentry)
+ break;
+ if (dinfo->di_bstart > dinfo->di_bend) {
+ dinfo->di_bstart = -1;
+ dinfo->di_bend = -1;
+ return;
+ }
+
+ dinfo->di_bend++;
+ while (0 <= --dinfo->di_bend)
+ if (hdp[0 + dinfo->di_bend].hd_dentry)
+ break;
+ AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0);
+}
+
+void au_update_dbstart(struct dentry *dentry)
+{
+ aufs_bindex_t bindex, bend;
+ struct dentry *h_dentry;
+
+ bend = au_dbend(dentry);
+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ if (d_is_positive(h_dentry)) {
+ au_set_dbstart(dentry, bindex);
+ return;
+ }
+ au_set_h_dptr(dentry, bindex, NULL);
+ }
+}
+
+void au_update_dbend(struct dentry *dentry)
+{
+ aufs_bindex_t bindex, bstart;
+ struct dentry *h_dentry;
+
+ bstart = au_dbstart(dentry);
+ for (bindex = au_dbend(dentry); bindex >= bstart; bindex--) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ if (d_is_positive(h_dentry)) {
+ au_set_dbend(dentry, bindex);
+ return;
+ }
+ au_set_h_dptr(dentry, bindex, NULL);
+ }
+}
+
+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry)
+{
+ aufs_bindex_t bindex, bend;
+
+ bend = au_dbend(dentry);
+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++)
+ if (au_h_dptr(dentry, bindex) == h_dentry)
+ return bindex;
+ return -1;
+}
diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c
new file mode 100644
index 000000000..4644de4a9
--- /dev/null
+++ b/fs/aufs/dir.c
@@ -0,0 +1,743 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * directory operations
+ */
+
+#include <linux/fs_stack.h>
+#include "aufs.h"
+
+void au_add_nlink(struct inode *dir, struct inode *h_dir)
+{
+ unsigned int nlink;
+
+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+ nlink = dir->i_nlink;
+ nlink += h_dir->i_nlink - 2;
+ if (h_dir->i_nlink < 2)
+ nlink += 2;
+ smp_mb(); /* for i_nlink */
+ /* 0 can happen in revaliding */
+ set_nlink(dir, nlink);
+}
+
+void au_sub_nlink(struct inode *dir, struct inode *h_dir)
+{
+ unsigned int nlink;
+
+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode));
+
+ nlink = dir->i_nlink;
+ nlink -= h_dir->i_nlink - 2;
+ if (h_dir->i_nlink < 2)
+ nlink -= 2;
+ smp_mb(); /* for i_nlink */
+ /* nlink == 0 means the branch-fs is broken */
+ set_nlink(dir, nlink);
+}
+
+loff_t au_dir_size(struct file *file, struct dentry *dentry)
+{
+ loff_t sz;
+ aufs_bindex_t bindex, bend;
+ struct file *h_file;
+ struct dentry *h_dentry;
+
+ sz = 0;
+ if (file) {
+ AuDebugOn(!d_is_dir(file->f_path.dentry));
+
+ bend = au_fbend_dir(file);
+ for (bindex = au_fbstart(file);
+ bindex <= bend && sz < KMALLOC_MAX_SIZE;
+ bindex++) {
+ h_file = au_hf_dir(file, bindex);
+ if (h_file && file_inode(h_file))
+ sz += vfsub_f_size_read(h_file);
+ }
+ } else {
+ AuDebugOn(!dentry);
+ AuDebugOn(!d_is_dir(dentry));
+
+ bend = au_dbtaildir(dentry);
+ for (bindex = au_dbstart(dentry);
+ bindex <= bend && sz < KMALLOC_MAX_SIZE;
+ bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && d_is_positive(h_dentry))
+ sz += i_size_read(d_inode(h_dentry));
+ }
+ }
+ if (sz < KMALLOC_MAX_SIZE)
+ sz = roundup_pow_of_two(sz);
+ if (sz > KMALLOC_MAX_SIZE)
+ sz = KMALLOC_MAX_SIZE;
+ else if (sz < NAME_MAX) {
+ BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX);
+ sz = AUFS_RDBLK_DEF;
+ }
+ return sz;
+}
+
+struct au_dir_ts_arg {
+ struct dentry *dentry;
+ aufs_bindex_t brid;
+};
+
+static void au_do_dir_ts(void *arg)
+{
+ struct au_dir_ts_arg *a = arg;
+ struct au_dtime dt;
+ struct path h_path;
+ struct inode *dir, *h_dir;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_hinode *hdir;
+ int err;
+ aufs_bindex_t bstart, bindex;
+
+ sb = a->dentry->d_sb;
+ if (d_really_is_negative(a->dentry))
+ goto out;
+ /* no dir->i_mutex lock */
+ aufs_read_lock(a->dentry, AuLock_DW); /* noflush */
+
+ dir = d_inode(a->dentry);
+ bstart = au_ibstart(dir);
+ bindex = au_br_index(sb, a->brid);
+ if (bindex < bstart)
+ goto out_unlock;
+
+ br = au_sbr(sb, bindex);
+ h_path.dentry = au_h_dptr(a->dentry, bindex);
+ if (!h_path.dentry)
+ goto out_unlock;
+ h_path.mnt = au_br_mnt(br);
+ au_dtime_store(&dt, a->dentry, &h_path);
+
+ br = au_sbr(sb, bstart);
+ if (!au_br_writable(br->br_perm))
+ goto out_unlock;
+ h_path.dentry = au_h_dptr(a->dentry, bstart);
+ h_path.mnt = au_br_mnt(br);
+ err = vfsub_mnt_want_write(h_path.mnt);
+ if (err)
+ goto out_unlock;
+ hdir = au_hi(dir, bstart);
+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+ h_dir = au_h_iptr(dir, bstart);
+ if (h_dir->i_nlink
+ && timespec_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) {
+ dt.dt_h_path = h_path;
+ au_dtime_revert(&dt);
+ }
+ au_hn_imtx_unlock(hdir);
+ vfsub_mnt_drop_write(h_path.mnt);
+ au_cpup_attr_timesizes(dir);
+
+out_unlock:
+ aufs_read_unlock(a->dentry, AuLock_DW);
+out:
+ dput(a->dentry);
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ kfree(arg);
+}
+
+void au_dir_ts(struct inode *dir, aufs_bindex_t bindex)
+{
+ int perm, wkq_err;
+ aufs_bindex_t bstart;
+ struct au_dir_ts_arg *arg;
+ struct dentry *dentry;
+ struct super_block *sb;
+
+ IMustLock(dir);
+
+ dentry = d_find_any_alias(dir);
+ AuDebugOn(!dentry);
+ sb = dentry->d_sb;
+ bstart = au_ibstart(dir);
+ if (bstart == bindex) {
+ au_cpup_attr_timesizes(dir);
+ goto out;
+ }
+
+ perm = au_sbr_perm(sb, bstart);
+ if (!au_br_writable(perm))
+ goto out;
+
+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
+ if (!arg)
+ goto out;
+
+ arg->dentry = dget(dentry); /* will be dput-ted by au_do_dir_ts() */
+ arg->brid = au_sbr_id(sb, bindex);
+ wkq_err = au_wkq_nowait(au_do_dir_ts, arg, sb, /*flags*/0);
+ if (unlikely(wkq_err)) {
+ pr_err("wkq %d\n", wkq_err);
+ dput(dentry);
+ kfree(arg);
+ }
+
+out:
+ dput(dentry);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int reopen_dir(struct file *file)
+{
+ int err;
+ unsigned int flags;
+ aufs_bindex_t bindex, btail, bstart;
+ struct dentry *dentry, *h_dentry;
+ struct file *h_file;
+
+ /* open all lower dirs */
+ dentry = file->f_path.dentry;
+ bstart = au_dbstart(dentry);
+ for (bindex = au_fbstart(file); bindex < bstart; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ au_set_fbstart(file, bstart);
+
+ btail = au_dbtaildir(dentry);
+ for (bindex = au_fbend_dir(file); btail < bindex; bindex--)
+ au_set_h_fptr(file, bindex, NULL);
+ au_set_fbend_dir(file, btail);
+
+ flags = vfsub_file_flags(file);
+ for (bindex = bstart; bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+ h_file = au_hf_dir(file, bindex);
+ if (h_file)
+ continue;
+
+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out; /* close all? */
+ au_set_h_fptr(file, bindex, h_file);
+ }
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ err = 0;
+
+out:
+ return err;
+}
+
+static int do_open_dir(struct file *file, int flags, struct file *h_file)
+{
+ int err;
+ aufs_bindex_t bindex, btail;
+ struct dentry *dentry, *h_dentry;
+ struct vfsmount *mnt;
+
+ FiMustWriteLock(file);
+ AuDebugOn(h_file);
+
+ err = 0;
+ mnt = file->f_path.mnt;
+ dentry = file->f_path.dentry;
+ file->f_version = d_inode(dentry)->i_version;
+ bindex = au_dbstart(dentry);
+ au_set_fbstart(file, bindex);
+ btail = au_dbtaildir(dentry);
+ au_set_fbend_dir(file, btail);
+ for (; !err && bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!h_dentry)
+ continue;
+
+ err = vfsub_test_mntns(mnt, h_dentry->d_sb);
+ if (unlikely(err))
+ break;
+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+ if (IS_ERR(h_file)) {
+ err = PTR_ERR(h_file);
+ break;
+ }
+ au_set_h_fptr(file, bindex, h_file);
+ }
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ if (!err)
+ return 0; /* success */
+
+ /* close all */
+ for (bindex = au_fbstart(file); bindex <= btail; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ au_set_fbstart(file, -1);
+ au_set_fbend_dir(file, -1);
+
+ return err;
+}
+
+static int aufs_open_dir(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ int err;
+ struct super_block *sb;
+ struct au_fidir *fidir;
+
+ err = -ENOMEM;
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ fidir = au_fidir_alloc(sb);
+ if (fidir) {
+ struct au_do_open_args args = {
+ .open = do_open_dir,
+ .fidir = fidir
+ };
+ err = au_do_open(file, &args);
+ if (unlikely(err))
+ kfree(fidir);
+ }
+ si_read_unlock(sb);
+ return err;
+}
+
+static int aufs_release_dir(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ struct au_vdir *vdir_cache;
+ struct au_finfo *finfo;
+ struct au_fidir *fidir;
+ aufs_bindex_t bindex, bend;
+
+ finfo = au_fi(file);
+ fidir = finfo->fi_hdir;
+ if (fidir) {
+ au_sphl_del(&finfo->fi_hlist,
+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
+ vdir_cache = fidir->fd_vdir_cache; /* lock-free */
+ if (vdir_cache)
+ au_vdir_free(vdir_cache);
+
+ bindex = finfo->fi_btop;
+ if (bindex >= 0) {
+ /*
+ * calls fput() instead of filp_close(),
+ * since no dnotify or lock for the lower file.
+ */
+ bend = fidir->fd_bbot;
+ for (; bindex <= bend; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ }
+ kfree(fidir);
+ finfo->fi_hdir = NULL;
+ }
+ au_finfo_fin(file);
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_dir(struct file *file, fl_owner_t id)
+{
+ int err;
+ aufs_bindex_t bindex, bend;
+ struct file *h_file;
+
+ err = 0;
+ bend = au_fbend_dir(file);
+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
+ h_file = au_hf_dir(file, bindex);
+ if (h_file)
+ err = vfsub_flush(h_file, id);
+ }
+ return err;
+}
+
+static int aufs_flush_dir(struct file *file, fl_owner_t id)
+{
+ return au_do_flush(file, id, au_do_flush_dir);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync)
+{
+ int err;
+ aufs_bindex_t bend, bindex;
+ struct inode *inode;
+ struct super_block *sb;
+
+ err = 0;
+ sb = dentry->d_sb;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+ bend = au_dbend(dentry);
+ for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) {
+ struct path h_path;
+
+ if (au_test_ro(sb, bindex, inode))
+ continue;
+ h_path.dentry = au_h_dptr(dentry, bindex);
+ if (!h_path.dentry)
+ continue;
+
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_fsync(NULL, &h_path, datasync);
+ }
+
+ return err;
+}
+
+static int au_do_fsync_dir(struct file *file, int datasync)
+{
+ int err;
+ aufs_bindex_t bend, bindex;
+ struct file *h_file;
+ struct super_block *sb;
+ struct inode *inode;
+
+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
+ if (unlikely(err))
+ goto out;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ bend = au_fbend_dir(file);
+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) {
+ h_file = au_hf_dir(file, bindex);
+ if (!h_file || au_test_ro(sb, bindex, inode))
+ continue;
+
+ err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+ }
+
+out:
+ return err;
+}
+
+/*
+ * @file may be NULL
+ */
+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ int err;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct super_block *sb;
+
+ err = 0;
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ inode_lock(inode);
+ sb = dentry->d_sb;
+ si_noflush_read_lock(sb);
+ if (file)
+ err = au_do_fsync_dir(file, datasync);
+ else {
+ di_write_lock_child(dentry);
+ err = au_do_fsync_dir_no_file(dentry, datasync);
+ }
+ au_cpup_attr_timesizes(inode);
+ di_write_unlock(dentry);
+ if (file)
+ fi_write_unlock(file);
+
+ si_read_unlock(sb);
+ inode_unlock(inode);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_iterate(struct file *file, struct dir_context *ctx)
+{
+ int err;
+ struct dentry *dentry;
+ struct inode *inode, *h_inode;
+ struct super_block *sb;
+
+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos);
+
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+
+ sb = dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1);
+ if (unlikely(err))
+ goto out;
+ err = au_alive_dir(dentry);
+ if (!err)
+ err = au_vdir_init(file);
+ di_downgrade_lock(dentry, AuLock_IR);
+ if (unlikely(err))
+ goto out_unlock;
+
+ h_inode = au_h_iptr(inode, au_ibstart(inode));
+ if (!au_test_nfsd()) {
+ err = au_vdir_fill_de(file, ctx);
+ fsstack_copy_attr_atime(inode, h_inode);
+ } else {
+ /*
+ * nfsd filldir may call lookup_one_len(), vfs_getattr(),
+ * encode_fh() and others.
+ */
+ atomic_inc(&h_inode->i_count);
+ di_read_unlock(dentry, AuLock_IR);
+ si_read_unlock(sb);
+ err = au_vdir_fill_de(file, ctx);
+ fsstack_copy_attr_atime(inode, h_inode);
+ fi_write_unlock(file);
+ iput(h_inode);
+
+ AuTraceErr(err);
+ return err;
+ }
+
+out_unlock:
+ di_read_unlock(dentry, AuLock_IR);
+ fi_write_unlock(file);
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuTestEmpty_WHONLY 1
+#define AuTestEmpty_CALLED (1 << 1)
+#define AuTestEmpty_SHWH (1 << 2)
+#define au_ftest_testempty(flags, name) ((flags) & AuTestEmpty_##name)
+#define au_fset_testempty(flags, name) \
+ do { (flags) |= AuTestEmpty_##name; } while (0)
+#define au_fclr_testempty(flags, name) \
+ do { (flags) &= ~AuTestEmpty_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuTestEmpty_SHWH
+#define AuTestEmpty_SHWH 0
+#endif
+
+struct test_empty_arg {
+ struct dir_context ctx;
+ struct au_nhash *whlist;
+ unsigned int flags;
+ int err;
+ aufs_bindex_t bindex;
+};
+
+static int test_empty_cb(struct dir_context *ctx, const char *__name,
+ int namelen, loff_t offset __maybe_unused, u64 ino,
+ unsigned int d_type)
+{
+ struct test_empty_arg *arg = container_of(ctx, struct test_empty_arg,
+ ctx);
+ char *name = (void *)__name;
+
+ arg->err = 0;
+ au_fset_testempty(arg->flags, CALLED);
+ /* smp_mb(); */
+ if (name[0] == '.'
+ && (namelen == 1 || (name[1] == '.' && namelen == 2)))
+ goto out; /* success */
+
+ if (namelen <= AUFS_WH_PFX_LEN
+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+ if (au_ftest_testempty(arg->flags, WHONLY)
+ && !au_nhash_test_known_wh(arg->whlist, name, namelen))
+ arg->err = -ENOTEMPTY;
+ goto out;
+ }
+
+ name += AUFS_WH_PFX_LEN;
+ namelen -= AUFS_WH_PFX_LEN;
+ if (!au_nhash_test_known_wh(arg->whlist, name, namelen))
+ arg->err = au_nhash_append_wh
+ (arg->whlist, name, namelen, ino, d_type, arg->bindex,
+ au_ftest_testempty(arg->flags, SHWH));
+
+out:
+ /* smp_mb(); */
+ AuTraceErr(arg->err);
+ return arg->err;
+}
+
+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+ int err;
+ struct file *h_file;
+
+ h_file = au_h_open(dentry, arg->bindex,
+ O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE,
+ /*file*/NULL, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = 0;
+ if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE)
+ && !file_inode(h_file)->i_nlink)
+ goto out_put;
+
+ do {
+ arg->err = 0;
+ au_fclr_testempty(arg->flags, CALLED);
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(h_file, &arg->ctx);
+ if (err >= 0)
+ err = arg->err;
+ } while (!err && au_ftest_testempty(arg->flags, CALLED));
+
+out_put:
+ fput(h_file);
+ au_sbr_put(dentry->d_sb, arg->bindex);
+out:
+ return err;
+}
+
+struct do_test_empty_args {
+ int *errp;
+ struct dentry *dentry;
+ struct test_empty_arg *arg;
+};
+
+static void call_do_test_empty(void *args)
+{
+ struct do_test_empty_args *a = args;
+ *a->errp = do_test_empty(a->dentry, a->arg);
+}
+
+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg)
+{
+ int err, wkq_err;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ h_dentry = au_h_dptr(dentry, arg->bindex);
+ h_inode = d_inode(h_dentry);
+ /* todo: i_mode changes anytime? */
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ);
+ inode_unlock(h_inode);
+ if (!err)
+ err = do_test_empty(dentry, arg);
+ else {
+ struct do_test_empty_args args = {
+ .errp = &err,
+ .dentry = dentry,
+ .arg = arg
+ };
+ unsigned int flags = arg->flags;
+
+ wkq_err = au_wkq_wait(call_do_test_empty, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ arg->flags = flags;
+ }
+
+ return err;
+}
+
+int au_test_empty_lower(struct dentry *dentry)
+{
+ int err;
+ unsigned int rdhash;
+ aufs_bindex_t bindex, bstart, btail;
+ struct au_nhash whlist;
+ struct test_empty_arg arg = {
+ .ctx = {
+ .actor = test_empty_cb
+ }
+ };
+ int (*test_empty)(struct dentry *dentry, struct test_empty_arg *arg);
+
+ SiMustAnyLock(dentry->d_sb);
+
+ rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry));
+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+
+ arg.flags = 0;
+ arg.whlist = &whlist;
+ bstart = au_dbstart(dentry);
+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+ au_fset_testempty(arg.flags, SHWH);
+ test_empty = do_test_empty;
+ if (au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1))
+ test_empty = sio_test_empty;
+ arg.bindex = bstart;
+ err = test_empty(dentry, &arg);
+ if (unlikely(err))
+ goto out_whlist;
+
+ au_fset_testempty(arg.flags, WHONLY);
+ btail = au_dbtaildir(dentry);
+ for (bindex = bstart + 1; !err && bindex <= btail; bindex++) {
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && d_is_positive(h_dentry)) {
+ arg.bindex = bindex;
+ err = test_empty(dentry, &arg);
+ }
+ }
+
+out_whlist:
+ au_nhash_wh_free(&whlist);
+out:
+ return err;
+}
+
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist)
+{
+ int err;
+ struct test_empty_arg arg = {
+ .ctx = {
+ .actor = test_empty_cb
+ }
+ };
+ aufs_bindex_t bindex, btail;
+
+ err = 0;
+ arg.whlist = whlist;
+ arg.flags = AuTestEmpty_WHONLY;
+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH))
+ au_fset_testempty(arg.flags, SHWH);
+ btail = au_dbtaildir(dentry);
+ for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) {
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry && d_is_positive(h_dentry)) {
+ arg.bindex = bindex;
+ err = sio_test_empty(dentry, &arg);
+ }
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_dir_fop = {
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+ .read = generic_read_dir,
+ .iterate = aufs_iterate,
+ .unlocked_ioctl = aufs_ioctl_dir,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = aufs_compat_ioctl_dir,
+#endif
+ .open = aufs_open_dir,
+ .release = aufs_release_dir,
+ .flush = aufs_flush_dir,
+ .fsync = aufs_fsync_dir
+};
diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h
new file mode 100644
index 000000000..b0a79d722
--- /dev/null
+++ b/fs/aufs/dir.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * directory operations
+ */
+
+#ifndef __AUFS_DIR_H__
+#define __AUFS_DIR_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+
+/* ---------------------------------------------------------------------- */
+
+/* need to be faster and smaller */
+
+struct au_nhash {
+ unsigned int nh_num;
+ struct hlist_head *nh_head;
+};
+
+struct au_vdir_destr {
+ unsigned char len;
+ unsigned char name[0];
+} __packed;
+
+struct au_vdir_dehstr {
+ struct hlist_node hash;
+ struct au_vdir_destr *str;
+} ____cacheline_aligned_in_smp;
+
+struct au_vdir_de {
+ ino_t de_ino;
+ unsigned char de_type;
+ /* caution: packed */
+ struct au_vdir_destr de_str;
+} __packed;
+
+struct au_vdir_wh {
+ struct hlist_node wh_hash;
+#ifdef CONFIG_AUFS_SHWH
+ ino_t wh_ino;
+ aufs_bindex_t wh_bindex;
+ unsigned char wh_type;
+#else
+ aufs_bindex_t wh_bindex;
+#endif
+ /* caution: packed */
+ struct au_vdir_destr wh_str;
+} __packed;
+
+union au_vdir_deblk_p {
+ unsigned char *deblk;
+ struct au_vdir_de *de;
+};
+
+struct au_vdir {
+ unsigned char **vd_deblk;
+ unsigned long vd_nblk;
+ struct {
+ unsigned long ul;
+ union au_vdir_deblk_p p;
+ } vd_last;
+
+ unsigned long vd_version;
+ unsigned int vd_deblk_sz;
+ unsigned long vd_jiffy;
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* dir.c */
+extern const struct file_operations aufs_dir_fop;
+void au_add_nlink(struct inode *dir, struct inode *h_dir);
+void au_sub_nlink(struct inode *dir, struct inode *h_dir);
+loff_t au_dir_size(struct file *file, struct dentry *dentry);
+void au_dir_ts(struct inode *dir, aufs_bindex_t bsrc);
+int au_test_empty_lower(struct dentry *dentry);
+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist);
+
+/* vdir.c */
+unsigned int au_rdhash_est(loff_t sz);
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp);
+void au_nhash_wh_free(struct au_nhash *whlist);
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+ int limit);
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen);
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+ unsigned int d_type, aufs_bindex_t bindex,
+ unsigned char shwh);
+void au_vdir_free(struct au_vdir *vdir);
+int au_vdir_init(struct file *file);
+int au_vdir_fill_de(struct file *file, struct dir_context *ctx);
+
+/* ioctl.c */
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_AUFS_RDU
+/* rdu.c */
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#endif
+#else
+AuStub(long, au_rdu_ioctl, return -EINVAL, struct file *file,
+ unsigned int cmd, unsigned long arg)
+#ifdef CONFIG_COMPAT
+AuStub(long, au_rdu_compat_ioctl, return -EINVAL, struct file *file,
+ unsigned int cmd, unsigned long arg)
+#endif
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DIR_H__ */
diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c
new file mode 100644
index 000000000..53a8b55d8
--- /dev/null
+++ b/fs/aufs/dynop.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2010-2016 Junjiro R. Okajima
+ */
+
+/*
+ * dynamically customizable operations for regular files
+ */
+
+#include "aufs.h"
+
+#define DyPrSym(key) AuDbgSym(key->dk_op.dy_hop)
+
+/*
+ * How large will these lists be?
+ * Usually just a few elements, 20-30 at most for each, I guess.
+ */
+static struct au_splhead dynop[AuDyLast];
+
+static struct au_dykey *dy_gfind_get(struct au_splhead *spl, const void *h_op)
+{
+ struct au_dykey *key, *tmp;
+ struct list_head *head;
+
+ key = NULL;
+ head = &spl->head;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, head, dk_list)
+ if (tmp->dk_op.dy_hop == h_op) {
+ key = tmp;
+ kref_get(&key->dk_kref);
+ break;
+ }
+ rcu_read_unlock();
+
+ return key;
+}
+
+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key)
+{
+ struct au_dykey **k, *found;
+ const void *h_op = key->dk_op.dy_hop;
+ int i;
+
+ found = NULL;
+ k = br->br_dykey;
+ for (i = 0; i < AuBrDynOp; i++)
+ if (k[i]) {
+ if (k[i]->dk_op.dy_hop == h_op) {
+ found = k[i];
+ break;
+ }
+ } else
+ break;
+ if (!found) {
+ spin_lock(&br->br_dykey_lock);
+ for (; i < AuBrDynOp; i++)
+ if (k[i]) {
+ if (k[i]->dk_op.dy_hop == h_op) {
+ found = k[i];
+ break;
+ }
+ } else {
+ k[i] = key;
+ break;
+ }
+ spin_unlock(&br->br_dykey_lock);
+ BUG_ON(i == AuBrDynOp); /* expand the array */
+ }
+
+ return found;
+}
+
+/* kref_get() if @key is already added */
+static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key)
+{
+ struct au_dykey *tmp, *found;
+ struct list_head *head;
+ const void *h_op = key->dk_op.dy_hop;
+
+ found = NULL;
+ head = &spl->head;
+ spin_lock(&spl->spin);
+ list_for_each_entry(tmp, head, dk_list)
+ if (tmp->dk_op.dy_hop == h_op) {
+ kref_get(&tmp->dk_kref);
+ found = tmp;
+ break;
+ }
+ if (!found)
+ list_add_rcu(&key->dk_list, head);
+ spin_unlock(&spl->spin);
+
+ if (!found)
+ DyPrSym(key);
+ return found;
+}
+
+static void dy_free_rcu(struct rcu_head *rcu)
+{
+ struct au_dykey *key;
+
+ key = container_of(rcu, struct au_dykey, dk_rcu);
+ DyPrSym(key);
+ kfree(key);
+}
+
+static void dy_free(struct kref *kref)
+{
+ struct au_dykey *key;
+ struct au_splhead *spl;
+
+ key = container_of(kref, struct au_dykey, dk_kref);
+ spl = dynop + key->dk_op.dy_type;
+ au_spl_del_rcu(&key->dk_list, spl);
+ call_rcu(&key->dk_rcu, dy_free_rcu);
+}
+
+void au_dy_put(struct au_dykey *key)
+{
+ kref_put(&key->dk_kref, dy_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define DyDbgSize(cnt, op) AuDebugOn(cnt != sizeof(op)/sizeof(void *))
+
+#ifdef CONFIG_AUFS_DEBUG
+#define DyDbgDeclare(cnt) unsigned int cnt = 0
+#define DyDbgInc(cnt) do { cnt++; } while (0)
+#else
+#define DyDbgDeclare(cnt) do {} while (0)
+#define DyDbgInc(cnt) do {} while (0)
+#endif
+
+#define DySet(func, dst, src, h_op, h_sb) do { \
+ DyDbgInc(cnt); \
+ if (h_op->func) { \
+ if (src.func) \
+ dst.func = src.func; \
+ else \
+ AuDbg("%s %s\n", au_sbtype(h_sb), #func); \
+ } \
+} while (0)
+
+#define DySetForce(func, dst, src) do { \
+ AuDebugOn(!src.func); \
+ DyDbgInc(cnt); \
+ dst.func = src.func; \
+} while (0)
+
+#define DySetAop(func) \
+ DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb)
+#define DySetAopForce(func) \
+ DySetForce(func, dyaop->da_op, aufs_aop)
+
+static void dy_aop(struct au_dykey *key, const void *h_op,
+ struct super_block *h_sb __maybe_unused)
+{
+ struct au_dyaop *dyaop = (void *)key;
+ const struct address_space_operations *h_aop = h_op;
+ DyDbgDeclare(cnt);
+
+ AuDbg("%s\n", au_sbtype(h_sb));
+
+ DySetAop(writepage);
+ DySetAopForce(readpage); /* force */
+ DySetAop(writepages);
+ DySetAop(set_page_dirty);
+ DySetAop(readpages);
+ DySetAop(write_begin);
+ DySetAop(write_end);
+ DySetAop(bmap);
+ DySetAop(invalidatepage);
+ DySetAop(releasepage);
+ DySetAop(freepage);
+ /* this one will be changed according to an aufs mount option */
+ DySetAop(direct_IO);
+ DySetAop(migratepage);
+ DySetAop(launder_page);
+ DySetAop(is_partially_uptodate);
+ DySetAop(is_dirty_writeback);
+ DySetAop(error_remove_page);
+ DySetAop(swap_activate);
+ DySetAop(swap_deactivate);
+
+ DyDbgSize(cnt, *h_aop);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void dy_bug(struct kref *kref)
+{
+ BUG();
+}
+
+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br)
+{
+ struct au_dykey *key, *old;
+ struct au_splhead *spl;
+ struct op {
+ unsigned int sz;
+ void (*set)(struct au_dykey *key, const void *h_op,
+ struct super_block *h_sb __maybe_unused);
+ };
+ static const struct op a[] = {
+ [AuDy_AOP] = {
+ .sz = sizeof(struct au_dyaop),
+ .set = dy_aop
+ }
+ };
+ const struct op *p;
+
+ spl = dynop + op->dy_type;
+ key = dy_gfind_get(spl, op->dy_hop);
+ if (key)
+ goto out_add; /* success */
+
+ p = a + op->dy_type;
+ key = kzalloc(p->sz, GFP_NOFS);
+ if (unlikely(!key)) {
+ key = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ key->dk_op.dy_hop = op->dy_hop;
+ kref_init(&key->dk_kref);
+ p->set(key, op->dy_hop, au_br_sb(br));
+ old = dy_gadd(spl, key);
+ if (old) {
+ kfree(key);
+ key = old;
+ }
+
+out_add:
+ old = dy_bradd(br, key);
+ if (old)
+ /* its ref-count should never be zero here */
+ kref_put(&key->dk_kref, dy_bug);
+out:
+ return key;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Aufs prohibits O_DIRECT by defaut even if the branch supports it.
+ * This behaviour is necessary to return an error from open(O_DIRECT) instead
+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes
+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error.
+ * See the aufs manual in detail.
+ */
+static void dy_adx(struct au_dyaop *dyaop, int do_dx)
+{
+ if (!do_dx)
+ dyaop->da_op.direct_IO = NULL;
+ else
+ dyaop->da_op.direct_IO = aufs_aop.direct_IO;
+}
+
+static struct au_dyaop *dy_aget(struct au_branch *br,
+ const struct address_space_operations *h_aop,
+ int do_dx)
+{
+ struct au_dyaop *dyaop;
+ struct au_dynop op;
+
+ op.dy_type = AuDy_AOP;
+ op.dy_haop = h_aop;
+ dyaop = (void *)dy_get(&op, br);
+ if (IS_ERR(dyaop))
+ goto out;
+ dy_adx(dyaop, do_dx);
+
+out:
+ return dyaop;
+}
+
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode)
+{
+ int err, do_dx;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_dyaop *dyaop;
+
+ AuDebugOn(!S_ISREG(h_inode->i_mode));
+ IiMustWriteLock(inode);
+
+ sb = inode->i_sb;
+ br = au_sbr(sb, bindex);
+ do_dx = !!au_opt_test(au_mntflags(sb), DIO);
+ dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx);
+ err = PTR_ERR(dyaop);
+ if (IS_ERR(dyaop))
+ /* unnecessary to call dy_fput() */
+ goto out;
+
+ err = 0;
+ inode->i_mapping->a_ops = &dyaop->da_op;
+
+out:
+ return err;
+}
+
+/*
+ * Is it safe to replace a_ops during the inode/file is in operation?
+ * Yes, I hope so.
+ */
+int au_dy_irefresh(struct inode *inode)
+{
+ int err;
+ aufs_bindex_t bstart;
+ struct inode *h_inode;
+
+ err = 0;
+ if (S_ISREG(inode->i_mode)) {
+ bstart = au_ibstart(inode);
+ h_inode = au_h_iptr(inode, bstart);
+ err = au_dy_iaop(inode, bstart, h_inode);
+ }
+ return err;
+}
+
+void au_dy_arefresh(int do_dx)
+{
+ struct au_splhead *spl;
+ struct list_head *head;
+ struct au_dykey *key;
+
+ spl = dynop + AuDy_AOP;
+ head = &spl->head;
+ spin_lock(&spl->spin);
+ list_for_each_entry(key, head, dk_list)
+ dy_adx((void *)key, do_dx);
+ spin_unlock(&spl->spin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void __init au_dy_init(void)
+{
+ int i;
+
+ /* make sure that 'struct au_dykey *' can be any type */
+ BUILD_BUG_ON(offsetof(struct au_dyaop, da_key));
+
+ for (i = 0; i < AuDyLast; i++)
+ au_spl_init(dynop + i);
+}
+
+void au_dy_fin(void)
+{
+ int i;
+
+ for (i = 0; i < AuDyLast; i++)
+ WARN_ON(!list_empty(&dynop[i].head));
+}
diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h
new file mode 100644
index 000000000..8680bfc53
--- /dev/null
+++ b/fs/aufs/dynop.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010-2016 Junjiro R. Okajima
+ */
+
+/*
+ * dynamically customizable operations (for regular files only)
+ */
+
+#ifndef __AUFS_DYNOP_H__
+#define __AUFS_DYNOP_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/kref.h>
+
+enum {AuDy_AOP, AuDyLast};
+
+struct au_dynop {
+ int dy_type;
+ union {
+ const void *dy_hop;
+ const struct address_space_operations *dy_haop;
+ };
+};
+
+struct au_dykey {
+ union {
+ struct list_head dk_list;
+ struct rcu_head dk_rcu;
+ };
+ struct au_dynop dk_op;
+
+ /*
+ * during I am in the branch local array, kref is gotten. when the
+ * branch is removed, kref is put.
+ */
+ struct kref dk_kref;
+};
+
+/* stop unioning since their sizes are very different from each other */
+struct au_dyaop {
+ struct au_dykey da_key;
+ struct address_space_operations da_op; /* not const */
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* dynop.c */
+struct au_branch;
+void au_dy_put(struct au_dykey *key);
+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode);
+int au_dy_irefresh(struct inode *inode);
+void au_dy_arefresh(int do_dio);
+
+void __init au_dy_init(void);
+void au_dy_fin(void);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_DYNOP_H__ */
diff --git a/fs/aufs/export.c b/fs/aufs/export.c
new file mode 100644
index 000000000..69b70f2a1
--- /dev/null
+++ b/fs/aufs/export.c
@@ -0,0 +1,817 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * export via nfs
+ */
+
+#include <linux/exportfs.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/random.h>
+#include <linux/writeback.h>
+#include "../fs/mount.h"
+#include "aufs.h"
+
+union conv {
+#ifdef CONFIG_AUFS_INO_T_64
+ __u32 a[2];
+#else
+ __u32 a[1];
+#endif
+ ino_t ino;
+};
+
+static ino_t decode_ino(__u32 *a)
+{
+ union conv u;
+
+ BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a));
+ u.a[0] = a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+ u.a[1] = a[1];
+#endif
+ return u.ino;
+}
+
+static void encode_ino(__u32 *a, ino_t ino)
+{
+ union conv u;
+
+ u.ino = ino;
+ a[0] = u.a[0];
+#ifdef CONFIG_AUFS_INO_T_64
+ a[1] = u.a[1];
+#endif
+}
+
+/* NFS file handle */
+enum {
+ Fh_br_id,
+ Fh_sigen,
+#ifdef CONFIG_AUFS_INO_T_64
+ /* support 64bit inode number */
+ Fh_ino1,
+ Fh_ino2,
+ Fh_dir_ino1,
+ Fh_dir_ino2,
+#else
+ Fh_ino1,
+ Fh_dir_ino1,
+#endif
+ Fh_igen,
+ Fh_h_type,
+ Fh_tail,
+
+ Fh_ino = Fh_ino1,
+ Fh_dir_ino = Fh_dir_ino1
+};
+
+static int au_test_anon(struct dentry *dentry)
+{
+ /* note: read d_flags without d_lock */
+ return !!(dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
+int au_test_nfsd(void)
+{
+ int ret;
+ struct task_struct *tsk = current;
+ char comm[sizeof(tsk->comm)];
+
+ ret = 0;
+ if (tsk->flags & PF_KTHREAD) {
+ get_task_comm(comm, tsk);
+ ret = !strcmp(comm, "nfsd");
+ }
+
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+/* inode generation external table */
+
+void au_xigen_inc(struct inode *inode)
+{
+ loff_t pos;
+ ssize_t sz;
+ __u32 igen;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+ sb = inode->i_sb;
+ AuDebugOn(!au_opt_test(au_mntflags(sb), XINO));
+
+ sbinfo = au_sbi(sb);
+ pos = inode->i_ino;
+ pos *= sizeof(igen);
+ igen = inode->i_generation + 1;
+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen,
+ sizeof(igen), &pos);
+ if (sz == sizeof(igen))
+ return; /* success */
+
+ if (unlikely(sz >= 0))
+ AuIOErr("xigen error (%zd)\n", sz);
+}
+
+int au_xigen_new(struct inode *inode)
+{
+ int err;
+ loff_t pos;
+ ssize_t sz;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+
+ err = 0;
+ /* todo: dirty, at mount time */
+ if (inode->i_ino == AUFS_ROOT_INO)
+ goto out;
+ sb = inode->i_sb;
+ SiMustAnyLock(sb);
+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+ goto out;
+
+ err = -EFBIG;
+ pos = inode->i_ino;
+ if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) {
+ AuIOErr1("too large i%lld\n", pos);
+ goto out;
+ }
+ pos *= sizeof(inode->i_generation);
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ file = sbinfo->si_xigen;
+ BUG_ON(!file);
+
+ if (vfsub_f_size_read(file)
+ < pos + sizeof(inode->i_generation)) {
+ inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next);
+ sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation,
+ sizeof(inode->i_generation), &pos);
+ } else
+ sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation,
+ sizeof(inode->i_generation), &pos);
+ if (sz == sizeof(inode->i_generation))
+ goto out; /* success */
+
+ err = sz;
+ if (unlikely(sz >= 0)) {
+ err = -EIO;
+ AuIOErr("xigen error (%zd)\n", sz);
+ }
+
+out:
+ return err;
+}
+
+int au_xigen_set(struct super_block *sb, struct file *base)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ file = au_xino_create2(base, sbinfo->si_xigen);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ err = 0;
+ if (sbinfo->si_xigen)
+ fput(sbinfo->si_xigen);
+ sbinfo->si_xigen = file;
+
+out:
+ return err;
+}
+
+void au_xigen_clr(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ if (sbinfo->si_xigen) {
+ fput(sbinfo->si_xigen);
+ sbinfo->si_xigen = NULL;
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino,
+ ino_t dir_ino)
+{
+ struct dentry *dentry, *d;
+ struct inode *inode;
+ unsigned int sigen;
+
+ dentry = NULL;
+ inode = ilookup(sb, ino);
+ if (!inode)
+ goto out;
+
+ dentry = ERR_PTR(-ESTALE);
+ sigen = au_sigen(sb);
+ if (unlikely(is_bad_inode(inode)
+ || IS_DEADDIR(inode)
+ || sigen != au_iigen(inode, NULL)))
+ goto out_iput;
+
+ dentry = NULL;
+ if (!dir_ino || S_ISDIR(inode->i_mode))
+ dentry = d_find_alias(inode);
+ else {
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&d->d_lock);
+ if (!au_test_anon(d)
+ && d_inode(d->d_parent)->i_ino == dir_ino) {
+ dentry = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ break;
+ }
+ spin_unlock(&d->d_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ if (unlikely(dentry && au_digen_test(dentry, sigen))) {
+ /* need to refresh */
+ dput(dentry);
+ dentry = NULL;
+ }
+
+out_iput:
+ iput(inode);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: dirty? */
+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */
+
+struct au_compare_mnt_args {
+ /* input */
+ struct super_block *sb;
+
+ /* output */
+ struct vfsmount *mnt;
+};
+
+static int au_compare_mnt(struct vfsmount *mnt, void *arg)
+{
+ struct au_compare_mnt_args *a = arg;
+
+ if (mnt->mnt_sb != a->sb)
+ return 0;
+ a->mnt = mntget(mnt);
+ return 1;
+}
+
+static struct vfsmount *au_mnt_get(struct super_block *sb)
+{
+ int err;
+ struct path root;
+ struct au_compare_mnt_args args = {
+ .sb = sb
+ };
+
+ get_fs_root(current->fs, &root);
+ rcu_read_lock();
+ err = iterate_mounts(au_compare_mnt, &args, root.mnt);
+ rcu_read_unlock();
+ path_put(&root);
+ AuDebugOn(!err);
+ AuDebugOn(!args.mnt);
+ return args.mnt;
+}
+
+struct au_nfsd_si_lock {
+ unsigned int sigen;
+ aufs_bindex_t bindex, br_id;
+ unsigned char force_lock;
+};
+
+static int si_nfsd_read_lock(struct super_block *sb,
+ struct au_nfsd_si_lock *nsi_lock)
+{
+ int err;
+ aufs_bindex_t bindex;
+
+ si_read_lock(sb, AuLock_FLUSH);
+
+ /* branch id may be wrapped around */
+ err = 0;
+ bindex = au_br_index(sb, nsi_lock->br_id);
+ if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb))
+ goto out; /* success */
+
+ err = -ESTALE;
+ bindex = -1;
+ if (!nsi_lock->force_lock)
+ si_read_unlock(sb);
+
+out:
+ nsi_lock->bindex = bindex;
+ return err;
+}
+
+struct find_name_by_ino {
+ struct dir_context ctx;
+ int called, found;
+ ino_t ino;
+ char *name;
+ int namelen;
+};
+
+static int
+find_name_by_ino(struct dir_context *ctx, const char *name, int namelen,
+ loff_t offset, u64 ino, unsigned int d_type)
+{
+ struct find_name_by_ino *a = container_of(ctx, struct find_name_by_ino,
+ ctx);
+
+ a->called++;
+ if (a->ino != ino)
+ return 0;
+
+ memcpy(a->name, name, namelen);
+ a->namelen = namelen;
+ a->found = 1;
+ return 1;
+}
+
+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino,
+ struct au_nfsd_si_lock *nsi_lock)
+{
+ struct dentry *dentry, *parent;
+ struct file *file;
+ struct inode *dir;
+ struct find_name_by_ino arg = {
+ .ctx = {
+ .actor = find_name_by_ino
+ }
+ };
+ int err;
+
+ parent = path->dentry;
+ if (nsi_lock)
+ si_read_unlock(parent->d_sb);
+ file = vfsub_dentry_open(path, au_dir_roflags);
+ dentry = (void *)file;
+ if (IS_ERR(file))
+ goto out;
+
+ dentry = ERR_PTR(-ENOMEM);
+ arg.name = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!arg.name))
+ goto out_file;
+ arg.ino = ino;
+ arg.found = 0;
+ do {
+ arg.called = 0;
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(file, &arg.ctx);
+ } while (!err && !arg.found && arg.called);
+ dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_name;
+ /* instead of ENOENT */
+ dentry = ERR_PTR(-ESTALE);
+ if (!arg.found)
+ goto out_name;
+
+ /* do not call vfsub_lkup_one() */
+ dir = d_inode(parent);
+ dentry = vfsub_lookup_one_len_unlocked(arg.name, parent, arg.namelen);
+ AuTraceErrPtr(dentry);
+ if (IS_ERR(dentry))
+ goto out_name;
+ AuDebugOn(au_test_anon(dentry));
+ if (unlikely(d_really_is_negative(dentry))) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOENT);
+ }
+
+out_name:
+ free_page((unsigned long)arg.name);
+out_file:
+ fput(file);
+out:
+ if (unlikely(nsi_lock
+ && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0))
+ if (!IS_ERR(dentry)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ESTALE);
+ }
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino,
+ ino_t dir_ino,
+ struct au_nfsd_si_lock *nsi_lock)
+{
+ struct dentry *dentry;
+ struct path path;
+
+ if (dir_ino != AUFS_ROOT_INO) {
+ path.dentry = decode_by_ino(sb, dir_ino, 0);
+ dentry = path.dentry;
+ if (!path.dentry || IS_ERR(path.dentry))
+ goto out;
+ AuDebugOn(au_test_anon(path.dentry));
+ } else
+ path.dentry = dget(sb->s_root);
+
+ path.mnt = au_mnt_get(sb);
+ dentry = au_lkup_by_ino(&path, ino, nsi_lock);
+ path_put(&path);
+
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int h_acceptable(void *expv, struct dentry *dentry)
+{
+ return 1;
+}
+
+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath,
+ char *buf, int len, struct super_block *sb)
+{
+ char *p;
+ int n;
+ struct path path;
+
+ p = d_path(h_rootpath, buf, len);
+ if (IS_ERR(p))
+ goto out;
+ n = strlen(p);
+
+ path.mnt = h_rootpath->mnt;
+ path.dentry = h_parent;
+ p = d_path(&path, buf, len);
+ if (IS_ERR(p))
+ goto out;
+ if (n != 1)
+ p += n;
+
+ path.mnt = au_mnt_get(sb);
+ path.dentry = sb->s_root;
+ p = d_path(&path, buf, len - strlen(p));
+ mntput(path.mnt);
+ if (IS_ERR(p))
+ goto out;
+ if (n != 1)
+ p[strlen(p)] = '/';
+
+out:
+ AuTraceErrPtr(p);
+ return p;
+}
+
+static
+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh,
+ int fh_len, struct au_nfsd_si_lock *nsi_lock)
+{
+ struct dentry *dentry, *h_parent, *root;
+ struct super_block *h_sb;
+ char *pathname, *p;
+ struct vfsmount *h_mnt;
+ struct au_branch *br;
+ int err;
+ struct path path;
+
+ br = au_sbr(sb, nsi_lock->bindex);
+ h_mnt = au_br_mnt(br);
+ h_sb = h_mnt->mnt_sb;
+ /* todo: call lower fh_to_dentry()? fh_to_parent()? */
+ h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail),
+ fh_len - Fh_tail, fh[Fh_h_type],
+ h_acceptable, /*context*/NULL);
+ dentry = h_parent;
+ if (unlikely(!h_parent || IS_ERR(h_parent))) {
+ AuWarn1("%s decode_fh failed, %ld\n",
+ au_sbtype(h_sb), PTR_ERR(h_parent));
+ goto out;
+ }
+ dentry = NULL;
+ if (unlikely(au_test_anon(h_parent))) {
+ AuWarn1("%s decode_fh returned a disconnected dentry\n",
+ au_sbtype(h_sb));
+ goto out_h_parent;
+ }
+
+ dentry = ERR_PTR(-ENOMEM);
+ pathname = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!pathname))
+ goto out_h_parent;
+
+ root = sb->s_root;
+ path.mnt = h_mnt;
+ di_read_lock_parent(root, !AuLock_IR);
+ path.dentry = au_h_dptr(root, nsi_lock->bindex);
+ di_read_unlock(root, !AuLock_IR);
+ p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb);
+ dentry = (void *)p;
+ if (IS_ERR(p))
+ goto out_pathname;
+
+ si_read_unlock(sb);
+ err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+ dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_relock;
+
+ dentry = ERR_PTR(-ENOENT);
+ AuDebugOn(au_test_anon(path.dentry));
+ if (unlikely(d_really_is_negative(path.dentry)))
+ goto out_path;
+
+ if (ino != d_inode(path.dentry)->i_ino)
+ dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL);
+ else
+ dentry = dget(path.dentry);
+
+out_path:
+ path_put(&path);
+out_relock:
+ if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0))
+ if (!IS_ERR(dentry)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ESTALE);
+ }
+out_pathname:
+ free_page((unsigned long)pathname);
+out_h_parent:
+ dput(h_parent);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *
+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
+ int fh_type)
+{
+ struct dentry *dentry;
+ __u32 *fh = fid->raw;
+ struct au_branch *br;
+ ino_t ino, dir_ino;
+ struct au_nfsd_si_lock nsi_lock = {
+ .force_lock = 0
+ };
+
+ dentry = ERR_PTR(-ESTALE);
+ /* it should never happen, but the file handle is unreliable */
+ if (unlikely(fh_len < Fh_tail))
+ goto out;
+ nsi_lock.sigen = fh[Fh_sigen];
+ nsi_lock.br_id = fh[Fh_br_id];
+
+ /* branch id may be wrapped around */
+ br = NULL;
+ if (unlikely(si_nfsd_read_lock(sb, &nsi_lock)))
+ goto out;
+ nsi_lock.force_lock = 1;
+
+ /* is this inode still cached? */
+ ino = decode_ino(fh + Fh_ino);
+ /* it should never happen */
+ if (unlikely(ino == AUFS_ROOT_INO))
+ goto out_unlock;
+
+ dir_ino = decode_ino(fh + Fh_dir_ino);
+ dentry = decode_by_ino(sb, ino, dir_ino);
+ if (IS_ERR(dentry))
+ goto out_unlock;
+ if (dentry)
+ goto accept;
+
+ /* is the parent dir cached? */
+ br = au_sbr(sb, nsi_lock.bindex);
+ atomic_inc(&br->br_count);
+ dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock);
+ if (IS_ERR(dentry))
+ goto out_unlock;
+ if (dentry)
+ goto accept;
+
+ /* lookup path */
+ dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock);
+ if (IS_ERR(dentry))
+ goto out_unlock;
+ if (unlikely(!dentry))
+ /* todo?: make it ESTALE */
+ goto out_unlock;
+
+accept:
+ if (!au_digen_test(dentry, au_sigen(sb))
+ && d_inode(dentry)->i_generation == fh[Fh_igen])
+ goto out_unlock; /* success */
+
+ dput(dentry);
+ dentry = ERR_PTR(-ESTALE);
+out_unlock:
+ if (br)
+ atomic_dec(&br->br_count);
+ si_read_unlock(sb);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+#if 0 /* reserved for future use */
+/* support subtreecheck option */
+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ struct dentry *parent;
+ __u32 *fh = fid->raw;
+ ino_t dir_ino;
+
+ dir_ino = decode_ino(fh + Fh_dir_ino);
+ parent = decode_by_ino(sb, dir_ino, 0);
+ if (IS_ERR(parent))
+ goto out;
+ if (!parent)
+ parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]),
+ dir_ino, fh, fh_len);
+
+out:
+ AuTraceErrPtr(parent);
+ return parent;
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *dir)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb, *h_sb;
+ struct dentry *dentry, *parent, *h_parent;
+ struct inode *h_dir;
+ struct au_branch *br;
+
+ err = -ENOSPC;
+ if (unlikely(*max_len <= Fh_tail)) {
+ AuWarn1("NFSv2 client (max_len %d)?\n", *max_len);
+ goto out;
+ }
+
+ err = FILEID_ROOT;
+ if (inode->i_ino == AUFS_ROOT_INO) {
+ AuDebugOn(inode->i_ino != AUFS_ROOT_INO);
+ goto out;
+ }
+
+ h_parent = NULL;
+ sb = inode->i_sb;
+ err = si_read_lock(sb, AuLock_FLUSH);
+ if (unlikely(err))
+ goto out;
+
+#ifdef CONFIG_AUFS_DEBUG
+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO)))
+ AuWarn1("NFS-exporting requires xino\n");
+#endif
+ err = -EIO;
+ parent = NULL;
+ ii_read_lock_child(inode);
+ bindex = au_ibstart(inode);
+ if (!dir) {
+ dentry = d_find_any_alias(inode);
+ if (unlikely(!dentry))
+ goto out_unlock;
+ AuDebugOn(au_test_anon(dentry));
+ parent = dget_parent(dentry);
+ dput(dentry);
+ if (unlikely(!parent))
+ goto out_unlock;
+ if (d_really_is_positive(parent))
+ dir = d_inode(parent);
+ }
+
+ ii_read_lock_parent(dir);
+ h_dir = au_h_iptr(dir, bindex);
+ ii_read_unlock(dir);
+ if (unlikely(!h_dir))
+ goto out_parent;
+ h_parent = d_find_any_alias(h_dir);
+ if (unlikely(!h_parent))
+ goto out_hparent;
+
+ err = -EPERM;
+ br = au_sbr(sb, bindex);
+ h_sb = au_br_sb(br);
+ if (unlikely(!h_sb->s_export_op)) {
+ AuErr1("%s branch is not exportable\n", au_sbtype(h_sb));
+ goto out_hparent;
+ }
+
+ fh[Fh_br_id] = br->br_id;
+ fh[Fh_sigen] = au_sigen(sb);
+ encode_ino(fh + Fh_ino, inode->i_ino);
+ encode_ino(fh + Fh_dir_ino, dir->i_ino);
+ fh[Fh_igen] = inode->i_generation;
+
+ *max_len -= Fh_tail;
+ fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail),
+ max_len,
+ /*connectable or subtreecheck*/0);
+ err = fh[Fh_h_type];
+ *max_len += Fh_tail;
+ /* todo: macros? */
+ if (err != FILEID_INVALID)
+ err = 99;
+ else
+ AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb));
+
+out_hparent:
+ dput(h_parent);
+out_parent:
+ dput(parent);
+out_unlock:
+ ii_read_unlock(inode);
+ si_read_unlock(sb);
+out:
+ if (unlikely(err < 0))
+ err = FILEID_INVALID;
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_commit_metadata(struct inode *inode)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb;
+ struct inode *h_inode;
+ int (*f)(struct inode *inode);
+
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+ ii_write_lock_child(inode);
+ bindex = au_ibstart(inode);
+ AuDebugOn(bindex < 0);
+ h_inode = au_h_iptr(inode, bindex);
+
+ f = h_inode->i_sb->s_export_op->commit_metadata;
+ if (f)
+ err = f(h_inode);
+ else {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0 /* metadata only */
+ };
+
+ err = sync_inode(h_inode, &wbc);
+ }
+
+ au_cpup_attr_timesizes(inode);
+ ii_write_unlock(inode);
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct export_operations aufs_export_op = {
+ .fh_to_dentry = aufs_fh_to_dentry,
+ /* .fh_to_parent = aufs_fh_to_parent, */
+ .encode_fh = aufs_encode_fh,
+ .commit_metadata = aufs_commit_metadata
+};
+
+void au_export_init(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+ __u32 u;
+
+ sb->s_export_op = &aufs_export_op;
+ sbinfo = au_sbi(sb);
+ sbinfo->si_xigen = NULL;
+ get_random_bytes(&u, sizeof(u));
+ BUILD_BUG_ON(sizeof(u) != sizeof(int));
+ atomic_set(&sbinfo->si_xigen_next, u);
+}
diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c
new file mode 100644
index 000000000..ac1304a18
--- /dev/null
+++ b/fs/aufs/f_op.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * file and vm operations
+ */
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+#include <linux/mman.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+int au_do_open_nondir(struct file *file, int flags, struct file *h_file)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct dentry *dentry, *h_dentry;
+ struct au_finfo *finfo;
+ struct inode *h_inode;
+
+ FiMustWriteLock(file);
+
+ err = 0;
+ dentry = file->f_path.dentry;
+ AuDebugOn(IS_ERR_OR_NULL(dentry));
+ finfo = au_fi(file);
+ memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop));
+ atomic_set(&finfo->fi_mmapped, 0);
+ bindex = au_dbstart(dentry);
+ if (!h_file) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb);
+ if (unlikely(err))
+ goto out;
+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0);
+ } else {
+ h_dentry = h_file->f_path.dentry;
+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb);
+ if (unlikely(err))
+ goto out;
+ get_file(h_file);
+ }
+ if (IS_ERR(h_file))
+ err = PTR_ERR(h_file);
+ else {
+ if ((flags & __O_TMPFILE)
+ && !(flags & O_EXCL)) {
+ h_inode = file_inode(h_file);
+ spin_lock(&h_inode->i_lock);
+ h_inode->i_state |= I_LINKABLE;
+ spin_unlock(&h_inode->i_lock);
+ }
+ au_set_fbstart(file, bindex);
+ au_set_h_fptr(file, bindex, h_file);
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ }
+
+out:
+ return err;
+}
+
+static int aufs_open_nondir(struct inode *inode __maybe_unused,
+ struct file *file)
+{
+ int err;
+ struct super_block *sb;
+ struct au_do_open_args args = {
+ .open = au_do_open_nondir
+ };
+
+ AuDbg("%pD, f_flags 0x%x, f_mode 0x%x\n",
+ file, vfsub_file_flags(file), file->f_mode);
+
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ err = au_do_open(file, &args);
+ si_read_unlock(sb);
+ return err;
+}
+
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file)
+{
+ struct au_finfo *finfo;
+ aufs_bindex_t bindex;
+
+ finfo = au_fi(file);
+ au_sphl_del(&finfo->fi_hlist,
+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
+ bindex = finfo->fi_btop;
+ if (bindex >= 0)
+ au_set_h_fptr(file, bindex, NULL);
+
+ au_finfo_fin(file);
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_do_flush_nondir(struct file *file, fl_owner_t id)
+{
+ int err;
+ struct file *h_file;
+
+ err = 0;
+ h_file = au_hf_top(file);
+ if (h_file)
+ err = vfsub_flush(h_file, id);
+ return err;
+}
+
+static int aufs_flush_nondir(struct file *file, fl_owner_t id)
+{
+ return au_do_flush(file, id, au_do_flush_nondir);
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * read and write functions acquire [fdi]_rwsem once, but release before
+ * mmap_sem. This is because to stop a race condition between mmap(2).
+ * Releasing these aufs-rwsem should be safe, no branch-mamagement (by keeping
+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in
+ * read functions after [fdi]_rwsem are released, but it should be harmless.
+ */
+
+/* Callers should call au_read_post() or fput() in the end */
+struct file *au_read_pre(struct file *file, int keep_fi)
+{
+ struct file *h_file;
+ int err;
+
+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0);
+ if (!err) {
+ di_read_unlock(file->f_path.dentry, AuLock_IR);
+ h_file = au_hf_top(file);
+ get_file(h_file);
+ if (!keep_fi)
+ fi_read_unlock(file);
+ } else
+ h_file = ERR_PTR(err);
+
+ return h_file;
+}
+
+static void au_read_post(struct inode *inode, struct file *h_file)
+{
+ /* update without lock, I don't think it a problem */
+ fsstack_copy_attr_atime(inode, file_inode(h_file));
+ fput(h_file);
+}
+
+struct au_write_pre {
+ blkcnt_t blks;
+ aufs_bindex_t bstart;
+};
+
+/*
+ * return with iinfo is write-locked
+ * callers should call au_write_post() or iinfo_write_unlock() + fput() in the
+ * end
+ */
+static struct file *au_write_pre(struct file *file, int do_ready,
+ struct au_write_pre *wpre)
+{
+ struct file *h_file;
+ struct dentry *dentry;
+ int err;
+ struct au_pin pin;
+
+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+ h_file = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ dentry = file->f_path.dentry;
+ if (do_ready) {
+ err = au_ready_to_write(file, -1, &pin);
+ if (unlikely(err)) {
+ h_file = ERR_PTR(err);
+ di_write_unlock(dentry);
+ goto out_fi;
+ }
+ }
+
+ di_downgrade_lock(dentry, /*flags*/0);
+ if (wpre)
+ wpre->bstart = au_fbstart(file);
+ h_file = au_hf_top(file);
+ get_file(h_file);
+ if (wpre)
+ wpre->blks = file_inode(h_file)->i_blocks;
+ if (do_ready)
+ au_unpin(&pin);
+ di_read_unlock(dentry, /*flags*/0);
+
+out_fi:
+ fi_write_unlock(file);
+out:
+ return h_file;
+}
+
+static void au_write_post(struct inode *inode, struct file *h_file,
+ struct au_write_pre *wpre, ssize_t written)
+{
+ struct inode *h_inode;
+
+ au_cpup_attr_timesizes(inode);
+ AuDebugOn(au_ibstart(inode) != wpre->bstart);
+ h_inode = file_inode(h_file);
+ inode->i_mode = h_inode->i_mode;
+ ii_write_unlock(inode);
+ fput(h_file);
+
+ /* AuDbg("blks %llu, %llu\n", (u64)blks, (u64)h_inode->i_blocks); */
+ if (written > 0)
+ au_fhsm_wrote(inode->i_sb, wpre->bstart,
+ /*force*/h_inode->i_blocks > wpre->blks);
+}
+
+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+ struct inode *inode;
+ struct file *h_file;
+ struct super_block *sb;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ /* filedata may be obsoleted by concurrent copyup, but no problem */
+ err = vfsub_read_u(h_file, buf, count, ppos);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ au_read_post(inode, h_file);
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+/*
+ * todo: very ugly
+ * it locks both of i_mutex and si_rwsem for read in safe.
+ * if the plink maintenance mode continues forever (that is the problem),
+ * may loop forever.
+ */
+static void au_mtx_and_read_lock(struct inode *inode)
+{
+ int err;
+ struct super_block *sb = inode->i_sb;
+
+ while (1) {
+ inode_lock(inode);
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (!err)
+ break;
+ inode_unlock(inode);
+ si_read_lock(sb, AuLock_NOPLMW);
+ si_read_unlock(sb);
+ }
+}
+
+static ssize_t aufs_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+ char __user *buf = (char __user *)ubuf;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = vfsub_write_u(h_file, buf, count, ppos);
+ au_write_post(inode, h_file, &wpre, err);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static ssize_t au_do_iter(struct file *h_file, int rw, struct kiocb *kio,
+ struct iov_iter *iov_iter)
+{
+ ssize_t err;
+ struct file *file;
+ ssize_t (*iter)(struct kiocb *, struct iov_iter *);
+
+ err = security_file_permission(h_file, rw);
+ if (unlikely(err))
+ goto out;
+
+ err = -ENOSYS;
+ iter = NULL;
+ if (rw == MAY_READ)
+ iter = h_file->f_op->read_iter;
+ else if (rw == MAY_WRITE)
+ iter = h_file->f_op->write_iter;
+
+ file = kio->ki_filp;
+ kio->ki_filp = h_file;
+ if (iter) {
+ lockdep_off();
+ err = iter(kio, iov_iter);
+ lockdep_on();
+ } else
+ /* currently there is no such fs */
+ WARN_ON_ONCE(1);
+ kio->ki_filp = file;
+
+out:
+ return err;
+}
+
+static ssize_t aufs_read_iter(struct kiocb *kio, struct iov_iter *iov_iter)
+{
+ ssize_t err;
+ struct file *file, *h_file;
+ struct inode *inode;
+ struct super_block *sb;
+
+ file = kio->ki_filp;
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = au_do_iter(h_file, MAY_READ, kio, iov_iter);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+ au_read_post(inode, h_file);
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+static ssize_t aufs_write_iter(struct kiocb *kio, struct iov_iter *iov_iter)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *file, *h_file;
+
+ file = kio->ki_filp;
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = au_do_iter(h_file, MAY_WRITE, kio, iov_iter);
+ au_write_post(inode, h_file, &wpre, err);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t err;
+ struct file *h_file;
+ struct inode *inode;
+ struct super_block *sb;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/1);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ if (au_test_loopback_kthread()) {
+ au_warn_loopback(h_file->f_path.dentry->d_sb);
+ if (file->f_mapping != h_file->f_mapping) {
+ file->f_mapping = h_file->f_mapping;
+ smp_mb(); /* unnecessary? */
+ }
+ }
+ fi_read_unlock(file);
+
+ err = vfsub_splice_to(h_file, ppos, pipe, len, flags);
+ /* todo: necessasry? */
+ /* file->f_ra = h_file->f_ra; */
+ au_read_post(inode, h_file);
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+static ssize_t
+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos,
+ size_t len, unsigned int flags)
+{
+ ssize_t err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = vfsub_splice_from(pipe, h_file, ppos, len, flags);
+ au_write_post(inode, h_file, &wpre, err);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+static long aufs_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len)
+{
+ long err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ lockdep_off();
+ err = vfs_fallocate(h_file, mode, offset, len);
+ lockdep_on();
+ au_write_post(inode, h_file, &wpre, /*written*/1);
+
+out:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * The locking order around current->mmap_sem.
+ * - in most and regular cases
+ * file I/O syscall -- aufs_read() or something
+ * -- si_rwsem for read -- mmap_sem
+ * (Note that [fdi]i_rwsem are released before mmap_sem).
+ * - in mmap case
+ * mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem
+ * This AB-BA order is definitly bad, but is not a problem since "si_rwsem for
+ * read" allows muliple processes to acquire it and [fdi]i_rwsem are not held in
+ * file I/O. Aufs needs to stop lockdep in aufs_mmap() though.
+ * It means that when aufs acquires si_rwsem for write, the process should never
+ * acquire mmap_sem.
+ *
+ * Actually aufs_iterate() holds [fdi]i_rwsem before mmap_sem, but this is not a
+ * problem either since any directory is not able to be mmap-ed.
+ * The similar scenario is applied to aufs_readlink() too.
+ */
+
+#if 0 /* stop calling security_file_mmap() */
+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */
+#define AuConv_VM_PROT(f, b) _calc_vm_trans(f, VM_##b, PROT_##b)
+
+static unsigned long au_arch_prot_conv(unsigned long flags)
+{
+ /* currently ppc64 only */
+#ifdef CONFIG_PPC64
+ /* cf. linux/arch/powerpc/include/asm/mman.h */
+ AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO);
+ return AuConv_VM_PROT(flags, SAO);
+#else
+ AuDebugOn(arch_calc_vm_prot_bits(-1));
+ return 0;
+#endif
+}
+
+static unsigned long au_prot_conv(unsigned long flags)
+{
+ return AuConv_VM_PROT(flags, READ)
+ | AuConv_VM_PROT(flags, WRITE)
+ | AuConv_VM_PROT(flags, EXEC)
+ | au_arch_prot_conv(flags);
+}
+
+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */
+#define AuConv_VM_MAP(f, b) _calc_vm_trans(f, VM_##b, MAP_##b)
+
+static unsigned long au_flag_conv(unsigned long flags)
+{
+ return AuConv_VM_MAP(flags, GROWSDOWN)
+ | AuConv_VM_MAP(flags, DENYWRITE)
+ | AuConv_VM_MAP(flags, LOCKED);
+}
+#endif
+
+static int aufs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int err;
+ const unsigned char wlock
+ = (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED);
+ struct super_block *sb;
+ struct file *h_file;
+ struct inode *inode;
+
+ AuDbgVmRegion(file, vma);
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ lockdep_off();
+ si_read_lock(sb, AuLock_NOPLMW);
+
+ h_file = au_write_pre(file, wlock, /*wpre*/NULL);
+ lockdep_on();
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ err = 0;
+ au_set_mmapped(file);
+ au_vm_file_reset(vma, h_file);
+ /*
+ * we cannot call security_mmap_file() here since it may acquire
+ * mmap_sem or i_mutex.
+ *
+ * err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags),
+ * au_flag_conv(vma->vm_flags));
+ */
+ if (!err)
+ err = h_file->f_op->mmap(h_file, vma);
+ if (!err) {
+ au_vm_prfile_set(vma, file);
+ fsstack_copy_attr_atime(inode, file_inode(h_file));
+ goto out_fput; /* success */
+ }
+ au_unset_mmapped(file);
+ au_vm_file_reset(vma, file);
+
+out_fput:
+ lockdep_off();
+ ii_write_unlock(inode);
+ lockdep_on();
+ fput(h_file);
+out:
+ lockdep_off();
+ si_read_unlock(sb);
+ lockdep_on();
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ int err;
+ struct au_write_pre wpre;
+ struct inode *inode;
+ struct file *h_file;
+
+ err = 0; /* -EBADF; */ /* posix? */
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ goto out;
+
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out_unlock;
+
+ err = vfsub_fsync(h_file, &h_file->f_path, datasync);
+ au_write_post(inode, h_file, &wpre, /*written*/0);
+
+out_unlock:
+ si_read_unlock(inode->i_sb);
+ inode_unlock(inode);
+out:
+ return err;
+}
+
+/* no one supports this operation, currently */
+#if 0
+static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync)
+{
+ int err;
+ struct au_write_pre wpre;
+ struct inode *inode, *h_inode;
+ struct file *file, *h_file;
+
+ err = 0; /* -EBADF; */ /* posix? */
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ goto out;
+
+ file = kio->ki_filp;
+ inode = file_inode(file);
+ au_mtx_and_read_lock(inode);
+
+ h_file = au_write_pre(file, /*do_ready*/1, &wpre);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out_unlock;
+
+ err = -ENOSYS;
+ h_file = au_hf_top(file);
+ if (h_file->f_op->aio_fsync) {
+ h_inode = file_inode(h_file);
+ if (!is_sync_kiocb(kio)) {
+ get_file(h_file);
+ fput(file);
+ }
+ kio->ki_filp = h_file;
+ err = h_file->f_op->aio_fsync(kio, datasync);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ if (!err)
+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL);
+ /*ignore*/
+ inode_unlock(h_inode);
+ }
+ au_write_post(inode, h_file, &wpre, /*written*/0);
+
+out_unlock:
+ si_read_unlock(inode->sb);
+ inode_unlock(inode);
+out:
+ return err;
+}
+#endif
+
+static int aufs_fasync(int fd, struct file *file, int flag)
+{
+ int err;
+ struct file *h_file;
+ struct super_block *sb;
+
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ if (h_file->f_op->fasync)
+ err = h_file->f_op->fasync(fd, h_file, flag);
+ fput(h_file); /* instead of au_read_post() */
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+static int aufs_setfl(struct file *file, unsigned long arg)
+{
+ int err;
+ struct file *h_file;
+ struct super_block *sb;
+
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ arg |= vfsub_file_flags(file) & FASYNC; /* stop calling h_file->fasync */
+ err = setfl(/*unused fd*/-1, h_file, arg);
+ fput(h_file); /* instead of au_read_post() */
+
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no one supports this operation, currently */
+#if 0
+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset,
+ size_t len, loff_t *pos, int more)
+{
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+const struct file_operations aufs_file_fop = {
+ .owner = THIS_MODULE,
+
+ .llseek = default_llseek,
+
+ .read = aufs_read,
+ .write = aufs_write,
+ .read_iter = aufs_read_iter,
+ .write_iter = aufs_write_iter,
+
+#ifdef CONFIG_AUFS_POLL
+ .poll = aufs_poll,
+#endif
+ .unlocked_ioctl = aufs_ioctl_nondir,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = aufs_compat_ioctl_nondir,
+#endif
+ .mmap = aufs_mmap,
+ .open = aufs_open_nondir,
+ .flush = aufs_flush_nondir,
+ .release = aufs_release_nondir,
+ .fsync = aufs_fsync_nondir,
+ /* .aio_fsync = aufs_aio_fsync_nondir, */
+ .fasync = aufs_fasync,
+ /* .sendpage = aufs_sendpage, */
+ .setfl = aufs_setfl,
+ .splice_write = aufs_splice_write,
+ .splice_read = aufs_splice_read,
+#if 0
+ .aio_splice_write = aufs_aio_splice_write,
+ .aio_splice_read = aufs_aio_splice_read,
+#endif
+ .fallocate = aufs_fallocate
+};
diff --git a/fs/aufs/fhsm.c b/fs/aufs/fhsm.c
new file mode 100644
index 000000000..db079d6ee
--- /dev/null
+++ b/fs/aufs/fhsm.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2011-2016 Junjiro R. Okajima
+ */
+
+/*
+ * File-based Hierarchy Storage Management
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+static aufs_bindex_t au_fhsm_bottom(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ AuDebugOn(!fhsm);
+ return fhsm->fhsm_bottom;
+}
+
+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ AuDebugOn(!fhsm);
+ fhsm->fhsm_bottom = bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_fhsm_test_jiffy(struct au_sbinfo *sbinfo, struct au_branch *br)
+{
+ struct au_br_fhsm *bf;
+
+ bf = br->br_fhsm;
+ MtxMustLock(&bf->bf_lock);
+
+ return !bf->bf_readable
+ || time_after(jiffies,
+ bf->bf_jiffy + sbinfo->si_fhsm.fhsm_expire);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_fhsm_notify(struct super_block *sb, int val)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ if (au_fhsm_pid(fhsm)
+ && atomic_read(&fhsm->fhsm_readable) != -1) {
+ atomic_set(&fhsm->fhsm_readable, val);
+ if (val)
+ wake_up(&fhsm->fhsm_wqh);
+ }
+}
+
+static int au_fhsm_stfs(struct super_block *sb, aufs_bindex_t bindex,
+ struct aufs_stfs *rstfs, int do_lock, int do_notify)
+{
+ int err;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ br = au_sbr(sb, bindex);
+ AuDebugOn(au_br_rdonly(br));
+ bf = br->br_fhsm;
+ AuDebugOn(!bf);
+
+ if (do_lock)
+ mutex_lock(&bf->bf_lock);
+ else
+ MtxMustLock(&bf->bf_lock);
+
+ /* sb->s_root for NFS is unreliable */
+ err = au_br_stfs(br, &bf->bf_stfs);
+ if (unlikely(err)) {
+ AuErr1("FHSM failed (%d), b%d, ignored.\n", bindex, err);
+ goto out;
+ }
+
+ bf->bf_jiffy = jiffies;
+ bf->bf_readable = 1;
+ if (do_notify)
+ au_fhsm_notify(sb, /*val*/1);
+ if (rstfs)
+ *rstfs = bf->bf_stfs;
+
+out:
+ if (do_lock)
+ mutex_unlock(&bf->bf_lock);
+ au_fhsm_notify(sb, /*val*/1);
+
+ return err;
+}
+
+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ AuDbg("b%d, force %d\n", bindex, force);
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ if (!au_ftest_si(sbinfo, FHSM)
+ || fhsm->fhsm_bottom == bindex)
+ return;
+
+ br = au_sbr(sb, bindex);
+ bf = br->br_fhsm;
+ AuDebugOn(!bf);
+ mutex_lock(&bf->bf_lock);
+ if (force
+ || au_fhsm_pid(fhsm)
+ || au_fhsm_test_jiffy(sbinfo, br))
+ err = au_fhsm_stfs(sb, bindex, /*rstfs*/NULL, /*do_lock*/0,
+ /*do_notify*/1);
+ mutex_unlock(&bf->bf_lock);
+}
+
+void au_fhsm_wrote_all(struct super_block *sb, int force)
+{
+ aufs_bindex_t bindex, bend;
+ struct au_branch *br;
+
+ /* exclude the bottom */
+ bend = au_fhsm_bottom(sb);
+ for (bindex = 0; bindex < bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm))
+ au_fhsm_wrote(sb, bindex, force);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static unsigned int au_fhsm_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ unsigned int mask;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ mask = 0;
+ sbinfo = file->private_data;
+ fhsm = &sbinfo->si_fhsm;
+ poll_wait(file, &fhsm->fhsm_wqh, wait);
+ if (atomic_read(&fhsm->fhsm_readable))
+ mask = POLLIN /* | POLLRDNORM */;
+
+ AuTraceErr((int)mask);
+ return mask;
+}
+
+static int au_fhsm_do_read_one(struct aufs_stbr __user *stbr,
+ struct aufs_stfs *stfs, __s16 brid)
+{
+ int err;
+
+ err = copy_to_user(&stbr->stfs, stfs, sizeof(*stfs));
+ if (!err)
+ err = __put_user(brid, &stbr->brid);
+ if (unlikely(err))
+ err = -EFAULT;
+
+ return err;
+}
+
+static ssize_t au_fhsm_do_read(struct super_block *sb,
+ struct aufs_stbr __user *stbr, size_t count)
+{
+ ssize_t err;
+ int nstbr;
+ aufs_bindex_t bindex, bend;
+ struct au_branch *br;
+ struct au_br_fhsm *bf;
+
+ /* except the bottom branch */
+ err = 0;
+ nstbr = 0;
+ bend = au_fhsm_bottom(sb);
+ for (bindex = 0; !err && bindex < bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!au_br_fhsm(br->br_perm))
+ continue;
+
+ bf = br->br_fhsm;
+ mutex_lock(&bf->bf_lock);
+ if (bf->bf_readable) {
+ err = -EFAULT;
+ if (count >= sizeof(*stbr))
+ err = au_fhsm_do_read_one(stbr++, &bf->bf_stfs,
+ br->br_id);
+ if (!err) {
+ bf->bf_readable = 0;
+ count -= sizeof(*stbr);
+ nstbr++;
+ }
+ }
+ mutex_unlock(&bf->bf_lock);
+ }
+ if (!err)
+ err = sizeof(*stbr) * nstbr;
+
+ return err;
+}
+
+static ssize_t au_fhsm_read(struct file *file, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ ssize_t err;
+ int readable;
+ aufs_bindex_t nfhsm, bindex, bend;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+ struct au_branch *br;
+ struct super_block *sb;
+
+ err = 0;
+ sbinfo = file->private_data;
+ fhsm = &sbinfo->si_fhsm;
+need_data:
+ spin_lock_irq(&fhsm->fhsm_wqh.lock);
+ if (!atomic_read(&fhsm->fhsm_readable)) {
+ if (vfsub_file_flags(file) & O_NONBLOCK)
+ err = -EAGAIN;
+ else
+ err = wait_event_interruptible_locked_irq
+ (fhsm->fhsm_wqh,
+ atomic_read(&fhsm->fhsm_readable));
+ }
+ spin_unlock_irq(&fhsm->fhsm_wqh.lock);
+ if (unlikely(err))
+ goto out;
+
+ /* sb may already be dead */
+ au_rw_read_lock(&sbinfo->si_rwsem);
+ readable = atomic_read(&fhsm->fhsm_readable);
+ if (readable > 0) {
+ sb = sbinfo->si_sb;
+ AuDebugOn(!sb);
+ /* exclude the bottom branch */
+ nfhsm = 0;
+ bend = au_fhsm_bottom(sb);
+ for (bindex = 0; bindex < bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm))
+ nfhsm++;
+ }
+ err = -EMSGSIZE;
+ if (nfhsm * sizeof(struct aufs_stbr) <= count) {
+ atomic_set(&fhsm->fhsm_readable, 0);
+ err = au_fhsm_do_read(sbinfo->si_sb, (void __user *)buf,
+ count);
+ }
+ }
+ au_rw_read_unlock(&sbinfo->si_rwsem);
+ if (!readable)
+ goto need_data;
+
+out:
+ return err;
+}
+
+static int au_fhsm_release(struct inode *inode, struct file *file)
+{
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ /* sb may already be dead */
+ sbinfo = file->private_data;
+ fhsm = &sbinfo->si_fhsm;
+ spin_lock(&fhsm->fhsm_spin);
+ fhsm->fhsm_pid = 0;
+ spin_unlock(&fhsm->fhsm_spin);
+ kobject_put(&sbinfo->si_kobj);
+
+ return 0;
+}
+
+static const struct file_operations au_fhsm_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .read = au_fhsm_read,
+ .poll = au_fhsm_poll,
+ .release = au_fhsm_release
+};
+
+int au_fhsm_fd(struct super_block *sb, int oflags)
+{
+ int err, fd;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+
+ err = -EPERM;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = -EINVAL;
+ if (unlikely(oflags & ~(O_CLOEXEC | O_NONBLOCK)))
+ goto out;
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ spin_lock(&fhsm->fhsm_spin);
+ if (!fhsm->fhsm_pid)
+ fhsm->fhsm_pid = current->pid;
+ else
+ err = -EBUSY;
+ spin_unlock(&fhsm->fhsm_spin);
+ if (unlikely(err))
+ goto out;
+
+ oflags |= O_RDONLY;
+ /* oflags |= FMODE_NONOTIFY; */
+ fd = anon_inode_getfd("[aufs_fhsm]", &au_fhsm_fops, sbinfo, oflags);
+ err = fd;
+ if (unlikely(fd < 0))
+ goto out_pid;
+
+ /* succeed reglardless 'fhsm' status */
+ kobject_get(&sbinfo->si_kobj);
+ si_noflush_read_lock(sb);
+ if (au_ftest_si(sbinfo, FHSM))
+ au_fhsm_wrote_all(sb, /*force*/0);
+ si_read_unlock(sb);
+ goto out; /* success */
+
+out_pid:
+ spin_lock(&fhsm->fhsm_spin);
+ fhsm->fhsm_pid = 0;
+ spin_unlock(&fhsm->fhsm_spin);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_fhsm_br_alloc(struct au_branch *br)
+{
+ int err;
+
+ err = 0;
+ br->br_fhsm = kmalloc(sizeof(*br->br_fhsm), GFP_NOFS);
+ if (br->br_fhsm)
+ au_br_fhsm_init(br->br_fhsm);
+ else
+ err = -ENOMEM;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_fhsm_fin(struct super_block *sb)
+{
+ au_fhsm_notify(sb, /*val*/-1);
+}
+
+void au_fhsm_init(struct au_sbinfo *sbinfo)
+{
+ struct au_fhsm *fhsm;
+
+ fhsm = &sbinfo->si_fhsm;
+ spin_lock_init(&fhsm->fhsm_spin);
+ init_waitqueue_head(&fhsm->fhsm_wqh);
+ atomic_set(&fhsm->fhsm_readable, 0);
+ fhsm->fhsm_expire
+ = msecs_to_jiffies(AUFS_FHSM_CACHE_DEF_SEC * MSEC_PER_SEC);
+ fhsm->fhsm_bottom = -1;
+}
+
+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec)
+{
+ sbinfo->si_fhsm.fhsm_expire
+ = msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo)
+{
+ unsigned int u;
+
+ if (!au_ftest_si(sbinfo, FHSM))
+ return;
+
+ u = jiffies_to_msecs(sbinfo->si_fhsm.fhsm_expire) / MSEC_PER_SEC;
+ if (u != AUFS_FHSM_CACHE_DEF_SEC)
+ seq_printf(seq, ",fhsm_sec=%u", u);
+}
diff --git a/fs/aufs/file.c b/fs/aufs/file.c
new file mode 100644
index 000000000..6b8a66b4a
--- /dev/null
+++ b/fs/aufs/file.c
@@ -0,0 +1,831 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * handling file/dir, and address_space operation
+ */
+
+#ifdef CONFIG_AUFS_DEBUG
+#include <linux/migrate.h>
+#endif
+#include <linux/pagemap.h>
+#include "aufs.h"
+
+/* drop flags for writing */
+unsigned int au_file_roflags(unsigned int flags)
+{
+ flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
+ flags |= O_RDONLY | O_NOATIME;
+ return flags;
+}
+
+/* common functions to regular file and dir */
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+ struct file *file, int force_wr)
+{
+ struct file *h_file;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct path h_path;
+ int err;
+
+ /* a race condition can happen between open and unlink/rmdir */
+ h_file = ERR_PTR(-ENOENT);
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (au_test_nfsd() && (!h_dentry || d_is_negative(h_dentry)))
+ goto out;
+ h_inode = d_inode(h_dentry);
+ spin_lock(&h_dentry->d_lock);
+ err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
+ /* || !d_inode(dentry)->i_nlink */
+ ;
+ spin_unlock(&h_dentry->d_lock);
+ if (unlikely(err))
+ goto out;
+
+ sb = dentry->d_sb;
+ br = au_sbr(sb, bindex);
+ err = au_br_test_oflag(flags, br);
+ h_file = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ /* drop flags for writing */
+ if (au_test_ro(sb, bindex, d_inode(dentry))) {
+ if (force_wr && !(flags & O_WRONLY))
+ force_wr = 0;
+ flags = au_file_roflags(flags);
+ if (force_wr) {
+ h_file = ERR_PTR(-EROFS);
+ flags = au_file_roflags(flags);
+ if (unlikely(vfsub_native_ro(h_inode)
+ || IS_APPEND(h_inode)))
+ goto out;
+ flags &= ~O_ACCMODE;
+ flags |= O_WRONLY;
+ }
+ }
+ flags &= ~O_CREAT;
+ atomic_inc(&br->br_count);
+ h_path.dentry = h_dentry;
+ h_path.mnt = au_br_mnt(br);
+ h_file = vfsub_dentry_open(&h_path, flags);
+ if (IS_ERR(h_file))
+ goto out_br;
+
+ if (flags & __FMODE_EXEC) {
+ err = deny_write_access(h_file);
+ if (unlikely(err)) {
+ fput(h_file);
+ h_file = ERR_PTR(err);
+ goto out_br;
+ }
+ }
+ fsnotify_open(h_file);
+ goto out; /* success */
+
+out_br:
+ atomic_dec(&br->br_count);
+out:
+ return h_file;
+}
+
+static int au_cmoo(struct dentry *dentry)
+{
+ int err, cmoo;
+ unsigned int udba;
+ struct path h_path;
+ struct au_pin pin;
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = -1,
+ .bsrc = -1,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ struct inode *delegated;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct au_fhsm *fhsm;
+ pid_t pid;
+ struct au_branch *br;
+ struct dentry *parent;
+ struct au_hinode *hdir;
+
+ DiMustWriteLock(dentry);
+ IiMustWriteLock(d_inode(dentry));
+
+ err = 0;
+ if (IS_ROOT(dentry))
+ goto out;
+ cpg.bsrc = au_dbstart(dentry);
+ if (!cpg.bsrc)
+ goto out;
+
+ sb = dentry->d_sb;
+ sbinfo = au_sbi(sb);
+ fhsm = &sbinfo->si_fhsm;
+ pid = au_fhsm_pid(fhsm);
+ if (pid
+ && (current->pid == pid
+ || current->real_parent->pid == pid))
+ goto out;
+
+ br = au_sbr(sb, cpg.bsrc);
+ cmoo = au_br_cmoo(br->br_perm);
+ if (!cmoo)
+ goto out;
+ if (!d_is_reg(dentry))
+ cmoo &= AuBrAttr_COO_ALL;
+ if (!cmoo)
+ goto out;
+
+ parent = dget_parent(dentry);
+ di_write_lock_parent(parent);
+ err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1);
+ cpg.bdst = err;
+ if (unlikely(err < 0)) {
+ err = 0; /* there is no upper writable branch */
+ goto out_dgrade;
+ }
+ AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst);
+
+ /* do not respect the coo attrib for the target branch */
+ err = au_cpup_dirs(dentry, cpg.bdst);
+ if (unlikely(err))
+ goto out_dgrade;
+
+ di_downgrade_lock(parent, AuLock_IR);
+ udba = au_opt_udba(sb);
+ err = au_pin(&pin, dentry, cpg.bdst, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_parent;
+
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ if (unlikely(err))
+ goto out_parent;
+ if (!(cmoo & AuBrWAttr_MOO))
+ goto out_parent; /* success */
+
+ err = au_pin(&pin, dentry, cpg.bsrc, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_parent;
+
+ h_path.mnt = au_br_mnt(br);
+ h_path.dentry = au_h_dptr(dentry, cpg.bsrc);
+ hdir = au_hi(d_inode(parent), cpg.bsrc);
+ delegated = NULL;
+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1);
+ au_unpin(&pin);
+ /* todo: keep h_dentry or not? */
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err)) {
+ pr_err("unlink %pd after coo failed (%d), ignored\n",
+ dentry, err);
+ err = 0;
+ }
+ goto out_parent; /* success */
+
+out_dgrade:
+ di_downgrade_lock(parent, AuLock_IR);
+out_parent:
+ di_read_unlock(parent, AuLock_IR);
+ dput(parent);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_do_open(struct file *file, struct au_do_open_args *args)
+{
+ int err, no_lock = args->no_lock;
+ struct dentry *dentry;
+ struct au_finfo *finfo;
+
+ if (!no_lock)
+ err = au_finfo_init(file, args->fidir);
+ else {
+ lockdep_off();
+ err = au_finfo_init(file, args->fidir);
+ lockdep_on();
+ }
+ if (unlikely(err))
+ goto out;
+
+ dentry = file->f_path.dentry;
+ AuDebugOn(IS_ERR_OR_NULL(dentry));
+ if (!no_lock) {
+ di_write_lock_child(dentry);
+ err = au_cmoo(dentry);
+ di_downgrade_lock(dentry, AuLock_IR);
+ if (!err)
+ err = args->open(file, vfsub_file_flags(file), NULL);
+ di_read_unlock(dentry, AuLock_IR);
+ } else {
+ err = au_cmoo(dentry);
+ if (!err)
+ err = args->open(file, vfsub_file_flags(file),
+ args->h_file);
+ if (!err && au_fbstart(file) != au_dbstart(dentry))
+ /*
+ * cmoo happens after h_file was opened.
+ * need to refresh file later.
+ */
+ atomic_dec(&au_fi(file)->fi_generation);
+ }
+
+ finfo = au_fi(file);
+ if (!err) {
+ finfo->fi_file = file;
+ au_sphl_add(&finfo->fi_hlist,
+ &au_sbi(file->f_path.dentry->d_sb)->si_files);
+ }
+ if (!no_lock)
+ fi_write_unlock(file);
+ else {
+ lockdep_off();
+ fi_write_unlock(file);
+ lockdep_on();
+ }
+ if (unlikely(err)) {
+ finfo->fi_hdir = NULL;
+ au_finfo_fin(file);
+ }
+
+out:
+ return err;
+}
+
+int au_reopen_nondir(struct file *file)
+{
+ int err;
+ aufs_bindex_t bstart;
+ struct dentry *dentry;
+ struct file *h_file, *h_file_tmp;
+
+ dentry = file->f_path.dentry;
+ bstart = au_dbstart(dentry);
+ h_file_tmp = NULL;
+ if (au_fbstart(file) == bstart) {
+ h_file = au_hf_top(file);
+ if (file->f_mode == h_file->f_mode)
+ return 0; /* success */
+ h_file_tmp = h_file;
+ get_file(h_file_tmp);
+ au_set_h_fptr(file, bstart, NULL);
+ }
+ AuDebugOn(au_fi(file)->fi_hdir);
+ /*
+ * it can happen
+ * file exists on both of rw and ro
+ * open --> dbstart and fbstart are both 0
+ * prepend a branch as rw, "rw" become ro
+ * remove rw/file
+ * delete the top branch, "rw" becomes rw again
+ * --> dbstart is 1, fbstart is still 0
+ * write --> fbstart is 0 but dbstart is 1
+ */
+ /* AuDebugOn(au_fbstart(file) < bstart); */
+
+ h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC,
+ file, /*force_wr*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file)) {
+ if (h_file_tmp) {
+ atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count);
+ au_set_h_fptr(file, bstart, h_file_tmp);
+ h_file_tmp = NULL;
+ }
+ goto out; /* todo: close all? */
+ }
+
+ err = 0;
+ au_set_fbstart(file, bstart);
+ au_set_h_fptr(file, bstart, h_file);
+ au_update_figen(file);
+ /* todo: necessary? */
+ /* file->f_ra = h_file->f_ra; */
+
+out:
+ if (h_file_tmp)
+ fput(h_file_tmp);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
+ struct dentry *hi_wh)
+{
+ int err;
+ aufs_bindex_t bstart;
+ struct au_dinfo *dinfo;
+ struct dentry *h_dentry;
+ struct au_hdentry *hdp;
+
+ dinfo = au_di(file->f_path.dentry);
+ AuRwMustWriteLock(&dinfo->di_rwsem);
+
+ bstart = dinfo->di_bstart;
+ dinfo->di_bstart = btgt;
+ hdp = dinfo->di_hdentry;
+ h_dentry = hdp[0 + btgt].hd_dentry;
+ hdp[0 + btgt].hd_dentry = hi_wh;
+ err = au_reopen_nondir(file);
+ hdp[0 + btgt].hd_dentry = h_dentry;
+ dinfo->di_bstart = bstart;
+
+ return err;
+}
+
+static int au_ready_to_write_wh(struct file *file, loff_t len,
+ aufs_bindex_t bcpup, struct au_pin *pin)
+{
+ int err;
+ struct inode *inode, *h_inode;
+ struct dentry *h_dentry, *hi_wh;
+ struct au_cp_generic cpg = {
+ .dentry = file->f_path.dentry,
+ .bdst = bcpup,
+ .bsrc = -1,
+ .len = len,
+ .pin = pin
+ };
+
+ au_update_dbstart(cpg.dentry);
+ inode = d_inode(cpg.dentry);
+ h_inode = NULL;
+ if (au_dbstart(cpg.dentry) <= bcpup
+ && au_dbend(cpg.dentry) >= bcpup) {
+ h_dentry = au_h_dptr(cpg.dentry, bcpup);
+ if (h_dentry && d_is_positive(h_dentry))
+ h_inode = d_inode(h_dentry);
+ }
+ hi_wh = au_hi_wh(inode, bcpup);
+ if (!hi_wh && !h_inode)
+ err = au_sio_cpup_wh(&cpg, file);
+ else
+ /* already copied-up after unlink */
+ err = au_reopen_wh(file, bcpup, hi_wh);
+
+ if (!err
+ && (inode->i_nlink > 1
+ || (inode->i_state & I_LINKABLE))
+ && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
+ au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
+
+ return err;
+}
+
+/*
+ * prepare the @file for writing.
+ */
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
+{
+ int err;
+ aufs_bindex_t dbstart;
+ struct dentry *parent;
+ struct inode *inode;
+ struct super_block *sb;
+ struct file *h_file;
+ struct au_cp_generic cpg = {
+ .dentry = file->f_path.dentry,
+ .bdst = -1,
+ .bsrc = -1,
+ .len = len,
+ .pin = pin,
+ .flags = AuCpup_DTIME
+ };
+
+ sb = cpg.dentry->d_sb;
+ inode = d_inode(cpg.dentry);
+ cpg.bsrc = au_fbstart(file);
+ err = au_test_ro(sb, cpg.bsrc, inode);
+ if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
+ err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
+ /*flags*/0);
+ goto out;
+ }
+
+ /* need to cpup or reopen */
+ parent = dget_parent(cpg.dentry);
+ di_write_lock_parent(parent);
+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+ cpg.bdst = err;
+ if (unlikely(err < 0))
+ goto out_dgrade;
+ err = 0;
+
+ if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
+ err = au_cpup_dirs(cpg.dentry, cpg.bdst);
+ if (unlikely(err))
+ goto out_dgrade;
+ }
+
+ err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_dgrade;
+
+ dbstart = au_dbstart(cpg.dentry);
+ if (dbstart <= cpg.bdst)
+ cpg.bsrc = cpg.bdst;
+
+ if (dbstart <= cpg.bdst /* just reopen */
+ || !d_unhashed(cpg.dentry) /* copyup and reopen */
+ ) {
+ h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0);
+ if (IS_ERR(h_file))
+ err = PTR_ERR(h_file);
+ else {
+ di_downgrade_lock(parent, AuLock_IR);
+ if (dbstart > cpg.bdst)
+ err = au_sio_cpup_simple(&cpg);
+ if (!err)
+ err = au_reopen_nondir(file);
+ au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
+ }
+ } else { /* copyup as wh and reopen */
+ /*
+ * since writable hfsplus branch is not supported,
+ * h_open_pre/post() are unnecessary.
+ */
+ err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
+ di_downgrade_lock(parent, AuLock_IR);
+ }
+
+ if (!err) {
+ au_pin_set_parent_lflag(pin, /*lflag*/0);
+ goto out_dput; /* success */
+ }
+ au_unpin(pin);
+ goto out_unlock;
+
+out_dgrade:
+ di_downgrade_lock(parent, AuLock_IR);
+out_unlock:
+ di_read_unlock(parent, AuLock_IR);
+out_dput:
+ dput(parent);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_do_flush(struct file *file, fl_owner_t id,
+ int (*flush)(struct file *file, fl_owner_t id))
+{
+ int err;
+ struct super_block *sb;
+ struct inode *inode;
+
+ inode = file_inode(file);
+ sb = inode->i_sb;
+ si_noflush_read_lock(sb);
+ fi_read_lock(file);
+ ii_read_lock_child(inode);
+
+ err = flush(file, id);
+ au_cpup_attr_timesizes(inode);
+
+ ii_read_unlock(inode);
+ fi_read_unlock(file);
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
+{
+ int err;
+ struct au_pin pin;
+ struct au_finfo *finfo;
+ struct dentry *parent, *hi_wh;
+ struct inode *inode;
+ struct super_block *sb;
+ struct au_cp_generic cpg = {
+ .dentry = file->f_path.dentry,
+ .bdst = -1,
+ .bsrc = -1,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME
+ };
+
+ FiMustWriteLock(file);
+
+ err = 0;
+ finfo = au_fi(file);
+ sb = cpg.dentry->d_sb;
+ inode = d_inode(cpg.dentry);
+ cpg.bdst = au_ibstart(inode);
+ if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
+ goto out;
+
+ parent = dget_parent(cpg.dentry);
+ if (au_test_ro(sb, cpg.bdst, inode)) {
+ di_read_lock_parent(parent, !AuLock_IR);
+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
+ cpg.bdst = err;
+ di_read_unlock(parent, !AuLock_IR);
+ if (unlikely(err < 0))
+ goto out_parent;
+ err = 0;
+ }
+
+ di_read_lock_parent(parent, AuLock_IR);
+ hi_wh = au_hi_wh(inode, cpg.bdst);
+ if (!S_ISDIR(inode->i_mode)
+ && au_opt_test(au_mntflags(sb), PLINK)
+ && au_plink_test(inode)
+ && !d_unhashed(cpg.dentry)
+ && cpg.bdst < au_dbstart(cpg.dentry)) {
+ err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
+ if (unlikely(err))
+ goto out_unlock;
+
+ /* always superio. */
+ err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (!err) {
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ }
+ } else if (hi_wh) {
+ /* already copied-up after unlink */
+ err = au_reopen_wh(file, cpg.bdst, hi_wh);
+ *need_reopen = 0;
+ }
+
+out_unlock:
+ di_read_unlock(parent, AuLock_IR);
+out_parent:
+ dput(parent);
+out:
+ return err;
+}
+
+static void au_do_refresh_dir(struct file *file)
+{
+ aufs_bindex_t bindex, bend, new_bindex, brid;
+ struct au_hfile *p, tmp, *q;
+ struct au_finfo *finfo;
+ struct super_block *sb;
+ struct au_fidir *fidir;
+
+ FiMustWriteLock(file);
+
+ sb = file->f_path.dentry->d_sb;
+ finfo = au_fi(file);
+ fidir = finfo->fi_hdir;
+ AuDebugOn(!fidir);
+ p = fidir->fd_hfile + finfo->fi_btop;
+ brid = p->hf_br->br_id;
+ bend = fidir->fd_bbot;
+ for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) {
+ if (!p->hf_file)
+ continue;
+
+ new_bindex = au_br_index(sb, p->hf_br->br_id);
+ if (new_bindex == bindex)
+ continue;
+ if (new_bindex < 0) {
+ au_set_h_fptr(file, bindex, NULL);
+ continue;
+ }
+
+ /* swap two lower inode, and loop again */
+ q = fidir->fd_hfile + new_bindex;
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ if (tmp.hf_file) {
+ bindex--;
+ p--;
+ }
+ }
+
+ p = fidir->fd_hfile;
+ if (!au_test_mmapped(file) && !d_unlinked(file->f_path.dentry)) {
+ bend = au_sbend(sb);
+ for (finfo->fi_btop = 0; finfo->fi_btop <= bend;
+ finfo->fi_btop++, p++)
+ if (p->hf_file) {
+ if (file_inode(p->hf_file))
+ break;
+ au_hfput(p, file);
+ }
+ } else {
+ bend = au_br_index(sb, brid);
+ for (finfo->fi_btop = 0; finfo->fi_btop < bend;
+ finfo->fi_btop++, p++)
+ if (p->hf_file)
+ au_hfput(p, file);
+ bend = au_sbend(sb);
+ }
+
+ p = fidir->fd_hfile + bend;
+ for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop;
+ fidir->fd_bbot--, p--)
+ if (p->hf_file) {
+ if (file_inode(p->hf_file))
+ break;
+ au_hfput(p, file);
+ }
+ AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
+}
+
+/*
+ * after branch manipulating, refresh the file.
+ */
+static int refresh_file(struct file *file, int (*reopen)(struct file *file))
+{
+ int err, need_reopen;
+ aufs_bindex_t bend, bindex;
+ struct dentry *dentry;
+ struct au_finfo *finfo;
+ struct au_hfile *hfile;
+
+ dentry = file->f_path.dentry;
+ finfo = au_fi(file);
+ if (!finfo->fi_hdir) {
+ hfile = &finfo->fi_htop;
+ AuDebugOn(!hfile->hf_file);
+ bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id);
+ AuDebugOn(bindex < 0);
+ if (bindex != finfo->fi_btop)
+ au_set_fbstart(file, bindex);
+ } else {
+ err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1);
+ if (unlikely(err))
+ goto out;
+ au_do_refresh_dir(file);
+ }
+
+ err = 0;
+ need_reopen = 1;
+ if (!au_test_mmapped(file))
+ err = au_file_refresh_by_inode(file, &need_reopen);
+ if (!err && need_reopen && !d_unlinked(dentry))
+ err = reopen(file);
+ if (!err) {
+ au_update_figen(file);
+ goto out; /* success */
+ }
+
+ /* error, close all lower files */
+ if (finfo->fi_hdir) {
+ bend = au_fbend_dir(file);
+ for (bindex = au_fbstart(file); bindex <= bend; bindex++)
+ au_set_h_fptr(file, bindex, NULL);
+ }
+
+out:
+ return err;
+}
+
+/* common function to regular file and dir */
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+ int wlock)
+{
+ int err;
+ unsigned int sigen, figen;
+ aufs_bindex_t bstart;
+ unsigned char pseudo_link;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ err = 0;
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+ sigen = au_sigen(dentry->d_sb);
+ fi_write_lock(file);
+ figen = au_figen(file);
+ di_write_lock_child(dentry);
+ bstart = au_dbstart(dentry);
+ pseudo_link = (bstart != au_ibstart(inode));
+ if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) {
+ if (!wlock) {
+ di_downgrade_lock(dentry, AuLock_IR);
+ fi_downgrade_lock(file);
+ }
+ goto out; /* success */
+ }
+
+ AuDbg("sigen %d, figen %d\n", sigen, figen);
+ if (au_digen_test(dentry, sigen)) {
+ err = au_reval_dpath(dentry, sigen);
+ AuDebugOn(!err && au_digen_test(dentry, sigen));
+ }
+
+ if (!err)
+ err = refresh_file(file, reopen);
+ if (!err) {
+ if (!wlock) {
+ di_downgrade_lock(dentry, AuLock_IR);
+ fi_downgrade_lock(file);
+ }
+ } else {
+ di_write_unlock(dentry);
+ fi_write_unlock(file);
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* cf. aufs_nopage() */
+/* for madvise(2) */
+static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
+{
+ unlock_page(page);
+ return 0;
+}
+
+/* it will never be called, but necessary to support O_DIRECT */
+static ssize_t aufs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ loff_t offset)
+{ BUG(); return 0; }
+
+/* they will never be called. */
+#ifdef CONFIG_AUFS_DEBUG
+static int aufs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{ AuUnsupport(); return 0; }
+static int aufs_writepage(struct page *page, struct writeback_control *wbc)
+{ AuUnsupport(); return 0; }
+
+static int aufs_set_page_dirty(struct page *page)
+{ AuUnsupport(); return 0; }
+static void aufs_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length)
+{ AuUnsupport(); }
+static int aufs_releasepage(struct page *page, gfp_t gfp)
+{ AuUnsupport(); return 0; }
+#if 0 /* called by memory compaction regardless file */
+static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{ AuUnsupport(); return 0; }
+#endif
+static int aufs_launder_page(struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_is_partially_uptodate(struct page *page,
+ unsigned long from,
+ unsigned long count)
+{ AuUnsupport(); return 0; }
+static void aufs_is_dirty_writeback(struct page *page, bool *dirty,
+ bool *writeback)
+{ AuUnsupport(); }
+static int aufs_error_remove_page(struct address_space *mapping,
+ struct page *page)
+{ AuUnsupport(); return 0; }
+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+{ AuUnsupport(); return 0; }
+static void aufs_swap_deactivate(struct file *file)
+{ AuUnsupport(); }
+#endif /* CONFIG_AUFS_DEBUG */
+
+const struct address_space_operations aufs_aop = {
+ .readpage = aufs_readpage,
+ .direct_IO = aufs_direct_IO,
+#ifdef CONFIG_AUFS_DEBUG
+ .writepage = aufs_writepage,
+ /* no writepages, because of writepage */
+ .set_page_dirty = aufs_set_page_dirty,
+ /* no readpages, because of readpage */
+ .write_begin = aufs_write_begin,
+ .write_end = aufs_write_end,
+ /* no bmap, no block device */
+ .invalidatepage = aufs_invalidatepage,
+ .releasepage = aufs_releasepage,
+ /* is fallback_migrate_page ok? */
+ /* .migratepage = aufs_migratepage, */
+ .launder_page = aufs_launder_page,
+ .is_partially_uptodate = aufs_is_partially_uptodate,
+ .is_dirty_writeback = aufs_is_dirty_writeback,
+ .error_remove_page = aufs_error_remove_page,
+ .swap_activate = aufs_swap_activate,
+ .swap_deactivate = aufs_swap_deactivate
+#endif /* CONFIG_AUFS_DEBUG */
+};
diff --git a/fs/aufs/file.h b/fs/aufs/file.h
new file mode 100644
index 000000000..27d802487
--- /dev/null
+++ b/fs/aufs/file.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * file operations
+ */
+
+#ifndef __AUFS_FILE_H__
+#define __AUFS_FILE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include "rwsem.h"
+
+struct au_branch;
+struct au_hfile {
+ struct file *hf_file;
+ struct au_branch *hf_br;
+};
+
+struct au_vdir;
+struct au_fidir {
+ aufs_bindex_t fd_bbot;
+ aufs_bindex_t fd_nent;
+ struct au_vdir *fd_vdir_cache;
+ struct au_hfile fd_hfile[];
+};
+
+static inline int au_fidir_sz(int nent)
+{
+ AuDebugOn(nent < 0);
+ return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent;
+}
+
+struct au_finfo {
+ atomic_t fi_generation;
+
+ struct au_rwsem fi_rwsem;
+ aufs_bindex_t fi_btop;
+
+ /* do not union them */
+ struct { /* for non-dir */
+ struct au_hfile fi_htop;
+ atomic_t fi_mmapped;
+ };
+ struct au_fidir *fi_hdir; /* for dir only */
+
+ struct hlist_node fi_hlist;
+ struct file *fi_file; /* very ugly */
+} ____cacheline_aligned_in_smp;
+
+/* ---------------------------------------------------------------------- */
+
+/* file.c */
+extern const struct address_space_operations aufs_aop;
+unsigned int au_file_roflags(unsigned int flags);
+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
+ struct file *file, int force_wr);
+struct au_do_open_args {
+ int no_lock;
+ int (*open)(struct file *file, int flags,
+ struct file *h_file);
+ struct au_fidir *fidir;
+ struct file *h_file;
+};
+int au_do_open(struct file *file, struct au_do_open_args *args);
+int au_reopen_nondir(struct file *file);
+struct au_pin;
+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin);
+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
+ int wlock);
+int au_do_flush(struct file *file, fl_owner_t id,
+ int (*flush)(struct file *file, fl_owner_t id));
+
+/* poll.c */
+#ifdef CONFIG_AUFS_POLL
+unsigned int aufs_poll(struct file *file, poll_table *wait);
+#endif
+
+#ifdef CONFIG_AUFS_BR_HFSPLUS
+/* hfsplus.c */
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
+ int force_wr);
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+ struct file *h_file);
+#else
+AuStub(struct file *, au_h_open_pre, return NULL, struct dentry *dentry,
+ aufs_bindex_t bindex, int force_wr)
+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex,
+ struct file *h_file);
+#endif
+
+/* f_op.c */
+extern const struct file_operations aufs_file_fop;
+int au_do_open_nondir(struct file *file, int flags, struct file *h_file);
+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file);
+struct file *au_read_pre(struct file *file, int keep_fi);
+
+/* finfo.c */
+void au_hfput(struct au_hfile *hf, struct file *file);
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex,
+ struct file *h_file);
+
+void au_update_figen(struct file *file);
+struct au_fidir *au_fidir_alloc(struct super_block *sb);
+int au_fidir_realloc(struct au_finfo *finfo, int nbr);
+
+void au_fi_init_once(void *_fi);
+void au_finfo_fin(struct file *file);
+int au_finfo_init(struct file *file, struct au_fidir *fidir);
+
+/* ioctl.c */
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+ unsigned long arg);
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_finfo *au_fi(struct file *file)
+{
+ return file->private_data;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * fi_read_lock, fi_write_lock,
+ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock
+ */
+AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem);
+
+#define FiMustNoWaiters(f) AuRwMustNoWaiters(&au_fi(f)->fi_rwsem)
+#define FiMustAnyLock(f) AuRwMustAnyLock(&au_fi(f)->fi_rwsem)
+#define FiMustWriteLock(f) AuRwMustWriteLock(&au_fi(f)->fi_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: hard/soft set? */
+static inline aufs_bindex_t au_fbstart(struct file *file)
+{
+ FiMustAnyLock(file);
+ return au_fi(file)->fi_btop;
+}
+
+static inline aufs_bindex_t au_fbend_dir(struct file *file)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_hdir->fd_bbot;
+}
+
+static inline struct au_vdir *au_fvdir_cache(struct file *file)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_hdir->fd_vdir_cache;
+}
+
+static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex)
+{
+ FiMustWriteLock(file);
+ au_fi(file)->fi_btop = bindex;
+}
+
+static inline void au_set_fbend_dir(struct file *file, aufs_bindex_t bindex)
+{
+ FiMustWriteLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ au_fi(file)->fi_hdir->fd_bbot = bindex;
+}
+
+static inline void au_set_fvdir_cache(struct file *file,
+ struct au_vdir *vdir_cache)
+{
+ FiMustWriteLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache;
+}
+
+static inline struct file *au_hf_top(struct file *file)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_htop.hf_file;
+}
+
+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex)
+{
+ FiMustAnyLock(file);
+ AuDebugOn(!au_fi(file)->fi_hdir);
+ return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file;
+}
+
+/* todo: memory barrier? */
+static inline unsigned int au_figen(struct file *f)
+{
+ return atomic_read(&au_fi(f)->fi_generation);
+}
+
+static inline void au_set_mmapped(struct file *f)
+{
+ if (atomic_inc_return(&au_fi(f)->fi_mmapped))
+ return;
+ pr_warn("fi_mmapped wrapped around\n");
+ while (!atomic_inc_return(&au_fi(f)->fi_mmapped))
+ ;
+}
+
+static inline void au_unset_mmapped(struct file *f)
+{
+ atomic_dec(&au_fi(f)->fi_mmapped);
+}
+
+static inline int au_test_mmapped(struct file *f)
+{
+ return atomic_read(&au_fi(f)->fi_mmapped);
+}
+
+/* customize vma->vm_file */
+
+static inline void au_do_vm_file_reset(struct vm_area_struct *vma,
+ struct file *file)
+{
+ struct file *f;
+
+ f = vma->vm_file;
+ get_file(file);
+ vma->vm_file = file;
+ fput(f);
+}
+
+#ifdef CONFIG_MMU
+#define AuDbgVmRegion(file, vma) do {} while (0)
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+ struct file *file)
+{
+ au_do_vm_file_reset(vma, file);
+}
+#else
+#define AuDbgVmRegion(file, vma) \
+ AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file))
+
+static inline void au_vm_file_reset(struct vm_area_struct *vma,
+ struct file *file)
+{
+ struct file *f;
+
+ au_do_vm_file_reset(vma, file);
+ f = vma->vm_region->vm_file;
+ get_file(file);
+ vma->vm_region->vm_file = file;
+ fput(f);
+}
+#endif /* CONFIG_MMU */
+
+/* handle vma->vm_prfile */
+static inline void au_vm_prfile_set(struct vm_area_struct *vma,
+ struct file *file)
+{
+ get_file(file);
+ vma->vm_prfile = file;
+#ifndef CONFIG_MMU
+ get_file(file);
+ vma->vm_region->vm_prfile = file;
+#endif
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FILE_H__ */
diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c
new file mode 100644
index 000000000..b5eb55dfb
--- /dev/null
+++ b/fs/aufs/finfo.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * file private data
+ */
+
+#include "aufs.h"
+
+void au_hfput(struct au_hfile *hf, struct file *file)
+{
+ /* todo: direct access f_flags */
+ if (vfsub_file_flags(file) & __FMODE_EXEC)
+ allow_write_access(hf->hf_file);
+ fput(hf->hf_file);
+ hf->hf_file = NULL;
+ atomic_dec(&hf->hf_br->br_count);
+ hf->hf_br = NULL;
+}
+
+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val)
+{
+ struct au_finfo *finfo = au_fi(file);
+ struct au_hfile *hf;
+ struct au_fidir *fidir;
+
+ fidir = finfo->fi_hdir;
+ if (!fidir) {
+ AuDebugOn(finfo->fi_btop != bindex);
+ hf = &finfo->fi_htop;
+ } else
+ hf = fidir->fd_hfile + bindex;
+
+ if (hf && hf->hf_file)
+ au_hfput(hf, file);
+ if (val) {
+ FiMustWriteLock(file);
+ AuDebugOn(IS_ERR_OR_NULL(file->f_path.dentry));
+ hf->hf_file = val;
+ hf->hf_br = au_sbr(file->f_path.dentry->d_sb, bindex);
+ }
+}
+
+void au_update_figen(struct file *file)
+{
+ atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_path.dentry));
+ /* smp_mb(); */ /* atomic_set */
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_fidir *au_fidir_alloc(struct super_block *sb)
+{
+ struct au_fidir *fidir;
+ int nbr;
+
+ nbr = au_sbend(sb) + 1;
+ if (nbr < 2)
+ nbr = 2; /* initial allocate for 2 branches */
+ fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS);
+ if (fidir) {
+ fidir->fd_bbot = -1;
+ fidir->fd_nent = nbr;
+ }
+
+ return fidir;
+}
+
+int au_fidir_realloc(struct au_finfo *finfo, int nbr)
+{
+ int err;
+ struct au_fidir *fidir, *p;
+
+ AuRwMustWriteLock(&finfo->fi_rwsem);
+ fidir = finfo->fi_hdir;
+ AuDebugOn(!fidir);
+
+ err = -ENOMEM;
+ p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr),
+ GFP_NOFS);
+ if (p) {
+ p->fd_nent = nbr;
+ finfo->fi_hdir = p;
+ err = 0;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_finfo_fin(struct file *file)
+{
+ struct au_finfo *finfo;
+
+ au_nfiles_dec(file->f_path.dentry->d_sb);
+
+ finfo = au_fi(file);
+ AuDebugOn(finfo->fi_hdir);
+ AuRwDestroy(&finfo->fi_rwsem);
+ au_cache_free_finfo(finfo);
+}
+
+void au_fi_init_once(void *_finfo)
+{
+ struct au_finfo *finfo = _finfo;
+ static struct lock_class_key aufs_fi;
+
+ au_rw_init(&finfo->fi_rwsem);
+ au_rw_class(&finfo->fi_rwsem, &aufs_fi);
+}
+
+int au_finfo_init(struct file *file, struct au_fidir *fidir)
+{
+ int err;
+ struct au_finfo *finfo;
+ struct dentry *dentry;
+
+ err = -ENOMEM;
+ dentry = file->f_path.dentry;
+ finfo = au_cache_alloc_finfo();
+ if (unlikely(!finfo))
+ goto out;
+
+ err = 0;
+ au_nfiles_inc(dentry->d_sb);
+ /* verbose coding for lock class name */
+ if (!fidir)
+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcNonDir_FIINFO);
+ else
+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcDir_FIINFO);
+ au_rw_write_lock(&finfo->fi_rwsem);
+ finfo->fi_btop = -1;
+ finfo->fi_hdir = fidir;
+ atomic_set(&finfo->fi_generation, au_digen(dentry));
+ /* smp_mb(); */ /* atomic_set */
+
+ file->private_data = finfo;
+
+out:
+ return err;
+}
diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h
new file mode 100644
index 000000000..725b2ffff
--- /dev/null
+++ b/fs/aufs/fstype.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * judging filesystem type
+ */
+
+#ifndef __AUFS_FSTYPE_H__
+#define __AUFS_FSTYPE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <linux/nfs_fs.h>
+#include <linux/romfs_fs.h>
+
+static inline int au_test_aufs(struct super_block *sb)
+{
+ return sb->s_magic == AUFS_SUPER_MAGIC;
+}
+
+static inline const char *au_sbtype(struct super_block *sb)
+{
+ return sb->s_type->name;
+}
+
+static inline int au_test_iso9660(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE)
+ return sb->s_magic == ISOFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_romfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE)
+ return sb->s_magic == ROMFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_cramfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE)
+ return sb->s_magic == CRAMFS_MAGIC;
+#endif
+ return 0;
+}
+
+static inline int au_test_nfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE)
+ return sb->s_magic == NFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_fuse(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE)
+ return sb->s_magic == FUSE_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_xfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE)
+ return sb->s_magic == XFS_SB_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_TMPFS
+ return sb->s_magic == TMPFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE)
+ return !strcmp(au_sbtype(sb), "ecryptfs");
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_ramfs(struct super_block *sb)
+{
+ return sb->s_magic == RAMFS_MAGIC;
+}
+
+static inline int au_test_ubifs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE)
+ return sb->s_magic == UBIFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_procfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_PROC_FS
+ return sb->s_magic == PROC_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_sysfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SYSFS
+ return sb->s_magic == SYSFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_configfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE)
+ return sb->s_magic == CONFIGFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_minix(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE)
+ return sb->s_magic == MINIX3_SUPER_MAGIC
+ || sb->s_magic == MINIX2_SUPER_MAGIC
+ || sb->s_magic == MINIX2_SUPER_MAGIC2
+ || sb->s_magic == MINIX_SUPER_MAGIC
+ || sb->s_magic == MINIX_SUPER_MAGIC2;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_fat(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE)
+ return sb->s_magic == MSDOS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_msdos(struct super_block *sb)
+{
+ return au_test_fat(sb);
+}
+
+static inline int au_test_vfat(struct super_block *sb)
+{
+ return au_test_fat(sb);
+}
+
+static inline int au_test_securityfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_SECURITYFS
+ return sb->s_magic == SECURITYFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_squashfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE)
+ return sb->s_magic == SQUASHFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_btrfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
+ return sb->s_magic == BTRFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_xenfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE)
+ return sb->s_magic == XENFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_debugfs(struct super_block *sb __maybe_unused)
+{
+#ifdef CONFIG_DEBUG_FS
+ return sb->s_magic == DEBUGFS_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_nilfs(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_NILFS) || defined(CONFIG_NILFS_MODULE)
+ return sb->s_magic == NILFS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused)
+{
+#if defined(CONFIG_HFSPLUS_FS) || defined(CONFIG_HFSPLUS_FS_MODULE)
+ return sb->s_magic == HFSPLUS_SUPER_MAGIC;
+#else
+ return 0;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * they can't be an aufs branch.
+ */
+static inline int au_test_fs_unsuppoted(struct super_block *sb)
+{
+ return
+#ifndef CONFIG_AUFS_BR_RAMFS
+ au_test_ramfs(sb) ||
+#endif
+ au_test_procfs(sb)
+ || au_test_sysfs(sb)
+ || au_test_configfs(sb)
+ || au_test_debugfs(sb)
+ || au_test_securityfs(sb)
+ || au_test_xenfs(sb)
+ || au_test_ecryptfs(sb)
+ /* || !strcmp(au_sbtype(sb), "unionfs") */
+ || au_test_aufs(sb); /* will be supported in next version */
+}
+
+static inline int au_test_fs_remote(struct super_block *sb)
+{
+ return !au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+ && !au_test_ramfs(sb)
+#endif
+ && !(sb->s_type->fs_flags & FS_REQUIRES_DEV);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Note: these functions (below) are created after reading ->getattr() in all
+ * filesystems under linux/fs. it means we have to do so in every update...
+ */
+
+/*
+ * some filesystems require getattr to refresh the inode attributes before
+ * referencing.
+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs)
+ * and leave the work for d_revalidate()
+ */
+static inline int au_test_fs_refresh_iattr(struct super_block *sb)
+{
+ return au_test_nfs(sb)
+ || au_test_fuse(sb)
+ /* || au_test_btrfs(sb) */ /* untested */
+ ;
+}
+
+/*
+ * filesystems which don't maintain i_size or i_blocks.
+ */
+static inline int au_test_fs_bad_iattr_size(struct super_block *sb)
+{
+ return au_test_xfs(sb)
+ || au_test_btrfs(sb)
+ || au_test_ubifs(sb)
+ || au_test_hfsplus(sb) /* maintained, but incorrect */
+ /* || au_test_minix(sb) */ /* untested */
+ ;
+}
+
+/*
+ * filesystems which don't store the correct value in some of their inode
+ * attributes.
+ */
+static inline int au_test_fs_bad_iattr(struct super_block *sb)
+{
+ return au_test_fs_bad_iattr_size(sb)
+ || au_test_fat(sb)
+ || au_test_msdos(sb)
+ || au_test_vfat(sb);
+}
+
+/* they don't check i_nlink in link(2) */
+static inline int au_test_fs_no_limit_nlink(struct super_block *sb)
+{
+ return au_test_tmpfs(sb)
+#ifdef CONFIG_AUFS_BR_RAMFS
+ || au_test_ramfs(sb)
+#endif
+ || au_test_ubifs(sb)
+ || au_test_hfsplus(sb);
+}
+
+/*
+ * filesystems which sets S_NOATIME and S_NOCMTIME.
+ */
+static inline int au_test_fs_notime(struct super_block *sb)
+{
+ return au_test_nfs(sb)
+ || au_test_fuse(sb)
+ || au_test_ubifs(sb)
+ ;
+}
+
+/* temporary support for i#1 in cramfs */
+static inline int au_test_fs_unique_ino(struct inode *inode)
+{
+ if (au_test_cramfs(inode->i_sb))
+ return inode->i_ino != 1;
+ return 1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * the filesystem where the xino files placed must support i/o after unlink and
+ * maintain i_size and i_blocks.
+ */
+static inline int au_test_fs_bad_xino(struct super_block *sb)
+{
+ return au_test_fs_remote(sb)
+ || au_test_fs_bad_iattr_size(sb)
+ /* don't want unnecessary work for xino */
+ || au_test_aufs(sb)
+ || au_test_ecryptfs(sb)
+ || au_test_nilfs(sb);
+}
+
+static inline int au_test_fs_trunc_xino(struct super_block *sb)
+{
+ return au_test_tmpfs(sb)
+ || au_test_ramfs(sb);
+}
+
+/*
+ * test if the @sb is real-readonly.
+ */
+static inline int au_test_fs_rr(struct super_block *sb)
+{
+ return au_test_squashfs(sb)
+ || au_test_iso9660(sb)
+ || au_test_cramfs(sb)
+ || au_test_romfs(sb);
+}
+
+/*
+ * test if the @inode is nfs with 'noacl' option
+ * NFS always sets MS_POSIXACL regardless its mount option 'noacl.'
+ */
+static inline int au_test_nfs_noacl(struct inode *inode)
+{
+ return au_test_nfs(inode->i_sb)
+ /* && IS_POSIXACL(inode) */
+ && !nfs_server_capable(inode, NFS_CAP_ACLS);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_FSTYPE_H__ */
diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c
new file mode 100644
index 000000000..c0a1a63a9
--- /dev/null
+++ b/fs/aufs/hfsnotify.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * fsnotify for the lower directories
+ */
+
+#include "aufs.h"
+
+/* FS_IN_IGNORED is unnecessary */
+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE
+ | FS_CREATE | FS_EVENT_ON_CHILD);
+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq);
+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0);
+
+static void au_hfsn_free_mark(struct fsnotify_mark *mark)
+{
+ struct au_hnotify *hn = container_of(mark, struct au_hnotify,
+ hn_mark);
+ AuDbg("here\n");
+ au_cache_free_hnotify(hn);
+ smp_mb__before_atomic();
+ if (atomic64_dec_and_test(&au_hfsn_ifree))
+ wake_up(&au_hfsn_wq);
+}
+
+static int au_hfsn_alloc(struct au_hinode *hinode)
+{
+ int err;
+ struct au_hnotify *hn;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct fsnotify_mark *mark;
+ aufs_bindex_t bindex;
+
+ hn = hinode->hi_notify;
+ sb = hn->hn_aufs_inode->i_sb;
+ bindex = au_br_index(sb, hinode->hi_id);
+ br = au_sbr(sb, bindex);
+ AuDebugOn(!br->br_hfsn);
+
+ mark = &hn->hn_mark;
+ fsnotify_init_mark(mark, au_hfsn_free_mark);
+ mark->mask = AuHfsnMask;
+ /*
+ * by udba rename or rmdir, aufs assign a new inode to the known
+ * h_inode, so specify 1 to allow dups.
+ */
+ lockdep_off();
+ err = fsnotify_add_mark(mark, br->br_hfsn->hfsn_group, hinode->hi_inode,
+ /*mnt*/NULL, /*allow_dups*/1);
+ /* even if err */
+ fsnotify_put_mark(mark);
+ lockdep_on();
+
+ return err;
+}
+
+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn)
+{
+ struct fsnotify_mark *mark;
+ unsigned long long ull;
+ struct fsnotify_group *group;
+
+ ull = atomic64_inc_return(&au_hfsn_ifree);
+ BUG_ON(!ull);
+
+ mark = &hn->hn_mark;
+ spin_lock(&mark->lock);
+ group = mark->group;
+ fsnotify_get_group(group);
+ spin_unlock(&mark->lock);
+ lockdep_off();
+ fsnotify_destroy_mark(mark, group);
+ fsnotify_put_group(group);
+ lockdep_on();
+
+ /* free hn by myself */
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set)
+{
+ struct fsnotify_mark *mark;
+
+ mark = &hinode->hi_notify->hn_mark;
+ spin_lock(&mark->lock);
+ if (do_set) {
+ AuDebugOn(mark->mask & AuHfsnMask);
+ mark->mask |= AuHfsnMask;
+ } else {
+ AuDebugOn(!(mark->mask & AuHfsnMask));
+ mark->mask &= ~AuHfsnMask;
+ }
+ spin_unlock(&mark->lock);
+ /* fsnotify_recalc_inode_mask(hinode->hi_inode); */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* #define AuDbgHnotify */
+#ifdef AuDbgHnotify
+static char *au_hfsn_name(u32 mask)
+{
+#ifdef CONFIG_AUFS_DEBUG
+#define test_ret(flag) \
+ do { \
+ if (mask & flag) \
+ return #flag; \
+ } while (0)
+ test_ret(FS_ACCESS);
+ test_ret(FS_MODIFY);
+ test_ret(FS_ATTRIB);
+ test_ret(FS_CLOSE_WRITE);
+ test_ret(FS_CLOSE_NOWRITE);
+ test_ret(FS_OPEN);
+ test_ret(FS_MOVED_FROM);
+ test_ret(FS_MOVED_TO);
+ test_ret(FS_CREATE);
+ test_ret(FS_DELETE);
+ test_ret(FS_DELETE_SELF);
+ test_ret(FS_MOVE_SELF);
+ test_ret(FS_UNMOUNT);
+ test_ret(FS_Q_OVERFLOW);
+ test_ret(FS_IN_IGNORED);
+ test_ret(FS_ISDIR);
+ test_ret(FS_IN_ONESHOT);
+ test_ret(FS_EVENT_ON_CHILD);
+ return "";
+#undef test_ret
+#else
+ return "??";
+#endif
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_free_group(struct fsnotify_group *group)
+{
+ struct au_br_hfsnotify *hfsn = group->private;
+
+ AuDbg("here\n");
+ kfree(hfsn);
+}
+
+static int au_hfsn_handle_event(struct fsnotify_group *group,
+ struct inode *inode,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+ const unsigned char *file_name, u32 cookie)
+{
+ int err;
+ struct au_hnotify *hnotify;
+ struct inode *h_dir, *h_inode;
+ struct qstr h_child_qstr = QSTR_INIT(file_name, strlen(file_name));
+
+ AuDebugOn(data_type != FSNOTIFY_EVENT_INODE);
+
+ err = 0;
+ /* if FS_UNMOUNT happens, there must be another bug */
+ AuDebugOn(mask & FS_UNMOUNT);
+ if (mask & (FS_IN_IGNORED | FS_UNMOUNT))
+ goto out;
+
+ h_dir = inode;
+ h_inode = NULL;
+#ifdef AuDbgHnotify
+ au_debug_on();
+ if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1
+ || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) {
+ AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n",
+ h_dir->i_ino, mask, au_hfsn_name(mask),
+ AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0);
+ /* WARN_ON(1); */
+ }
+ au_debug_off();
+#endif
+
+ AuDebugOn(!inode_mark);
+ hnotify = container_of(inode_mark, struct au_hnotify, hn_mark);
+ err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode);
+
+out:
+ return err;
+}
+
+static struct fsnotify_ops au_hfsn_ops = {
+ .handle_event = au_hfsn_handle_event,
+ .free_group_priv = au_hfsn_free_group
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin_br(struct au_branch *br)
+{
+ struct au_br_hfsnotify *hfsn;
+
+ hfsn = br->br_hfsn;
+ if (hfsn) {
+ lockdep_off();
+ fsnotify_put_group(hfsn->hfsn_group);
+ lockdep_on();
+ }
+}
+
+static int au_hfsn_init_br(struct au_branch *br, int perm)
+{
+ int err;
+ struct fsnotify_group *group;
+ struct au_br_hfsnotify *hfsn;
+
+ err = 0;
+ br->br_hfsn = NULL;
+ if (!au_br_hnotifyable(perm))
+ goto out;
+
+ err = -ENOMEM;
+ hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS);
+ if (unlikely(!hfsn))
+ goto out;
+
+ err = 0;
+ group = fsnotify_alloc_group(&au_hfsn_ops);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ pr_err("fsnotify_alloc_group() failed, %d\n", err);
+ goto out_hfsn;
+ }
+
+ group->private = hfsn;
+ hfsn->hfsn_group = group;
+ br->br_hfsn = hfsn;
+ goto out; /* success */
+
+out_hfsn:
+ kfree(hfsn);
+out:
+ return err;
+}
+
+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+ int err;
+
+ err = 0;
+ if (!br->br_hfsn)
+ err = au_hfsn_init_br(br, perm);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_hfsn_fin(void)
+{
+ AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree));
+ wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree));
+}
+
+const struct au_hnotify_op au_hnotify_op = {
+ .ctl = au_hfsn_ctl,
+ .alloc = au_hfsn_alloc,
+ .free = au_hfsn_free,
+
+ .fin = au_hfsn_fin,
+
+ .reset_br = au_hfsn_reset_br,
+ .fin_br = au_hfsn_fin_br,
+ .init_br = au_hfsn_init_br
+};
diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c
new file mode 100644
index 000000000..145c6ac2f
--- /dev/null
+++ b/fs/aufs/hfsplus.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010-2016 Junjiro R. Okajima
+ */
+
+/*
+ * special support for filesystems which aqucires an inode mutex
+ * at final closing a file, eg, hfsplus.
+ *
+ * This trick is very simple and stupid, just to open the file before really
+ * neceeary open to tell hfsplus that this is not the final closing.
+ * The caller should call au_h_open_pre() after acquiring the inode mutex,
+ * and au_h_open_post() after releasing it.
+ */
+
+#include "aufs.h"
+
+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex,
+ int force_wr)
+{
+ struct file *h_file;
+ struct dentry *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ AuDebugOn(!h_dentry);
+ AuDebugOn(d_is_negative(h_dentry));
+
+ h_file = NULL;
+ if (au_test_hfsplus(h_dentry->d_sb)
+ && d_is_reg(h_dentry))
+ h_file = au_h_open(dentry, bindex,
+ O_RDONLY | O_NOATIME | O_LARGEFILE,
+ /*file*/NULL, force_wr);
+ return h_file;
+}
+
+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex,
+ struct file *h_file)
+{
+ if (h_file) {
+ fput(h_file);
+ au_sbr_put(dentry->d_sb, bindex);
+ }
+}
diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c
new file mode 100644
index 000000000..92e432b6a
--- /dev/null
+++ b/fs/aufs/hnotify.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * abstraction to notify the direct changes on lower directories
+ */
+
+#include "aufs.h"
+
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode)
+{
+ int err;
+ struct au_hnotify *hn;
+
+ err = -ENOMEM;
+ hn = au_cache_alloc_hnotify();
+ if (hn) {
+ hn->hn_aufs_inode = inode;
+ hinode->hi_notify = hn;
+ err = au_hnotify_op.alloc(hinode);
+ AuTraceErr(err);
+ if (unlikely(err)) {
+ hinode->hi_notify = NULL;
+ au_cache_free_hnotify(hn);
+ /*
+ * The upper dir was removed by udba, but the same named
+ * dir left. In this case, aufs assignes a new inode
+ * number and set the monitor again.
+ * For the lower dir, the old monitnor is still left.
+ */
+ if (err == -EEXIST)
+ err = 0;
+ }
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+void au_hn_free(struct au_hinode *hinode)
+{
+ struct au_hnotify *hn;
+
+ hn = hinode->hi_notify;
+ if (hn) {
+ hinode->hi_notify = NULL;
+ if (au_hnotify_op.free(hinode, hn))
+ au_cache_free_hnotify(hn);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_hn_ctl(struct au_hinode *hinode, int do_set)
+{
+ if (hinode->hi_notify)
+ au_hnotify_op.ctl(hinode, do_set);
+}
+
+void au_hn_reset(struct inode *inode, unsigned int flags)
+{
+ aufs_bindex_t bindex, bend;
+ struct inode *hi;
+ struct dentry *iwhdentry;
+
+ bend = au_ibend(inode);
+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
+ hi = au_h_iptr(inode, bindex);
+ if (!hi)
+ continue;
+
+ /* inode_lock_nested(hi, AuLsc_I_CHILD); */
+ iwhdentry = au_hi_wh(inode, bindex);
+ if (iwhdentry)
+ dget(iwhdentry);
+ au_igrab(hi);
+ au_set_h_iptr(inode, bindex, NULL, 0);
+ au_set_h_iptr(inode, bindex, au_igrab(hi),
+ flags & ~AuHi_XINO);
+ iput(hi);
+ dput(iwhdentry);
+ /* inode_unlock(hi); */
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int hn_xino(struct inode *inode, struct inode *h_inode)
+{
+ int err;
+ aufs_bindex_t bindex, bend, bfound, bstart;
+ struct inode *h_i;
+
+ err = 0;
+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+ pr_warn("branch root dir was changed\n");
+ goto out;
+ }
+
+ bfound = -1;
+ bend = au_ibend(inode);
+ bstart = au_ibstart(inode);
+#if 0 /* reserved for future use */
+ if (bindex == bend) {
+ /* keep this ino in rename case */
+ goto out;
+ }
+#endif
+ for (bindex = bstart; bindex <= bend; bindex++)
+ if (au_h_iptr(inode, bindex) == h_inode) {
+ bfound = bindex;
+ break;
+ }
+ if (bfound < 0)
+ goto out;
+
+ for (bindex = bstart; bindex <= bend; bindex++) {
+ h_i = au_h_iptr(inode, bindex);
+ if (!h_i)
+ continue;
+
+ err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0);
+ /* ignore this error */
+ /* bad action? */
+ }
+
+ /* children inode number will be broken */
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int hn_gen_tree(struct dentry *dentry)
+{
+ int err, i, j, ndentry;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, dentry, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++) {
+ struct dentry *d;
+
+ d = dentries[j];
+ if (IS_ROOT(d))
+ continue;
+
+ au_digen_dec(d);
+ if (d_really_is_positive(d))
+ /* todo: reset children xino?
+ cached children only? */
+ au_iigen_dec(d_inode(d));
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+
+#if 0
+ /* discard children */
+ dentry_unhash(dentry);
+ dput(dentry);
+#endif
+out:
+ return err;
+}
+
+/*
+ * return 0 if processed.
+ */
+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode,
+ const unsigned int isdir)
+{
+ int err;
+ struct dentry *d;
+ struct qstr *dname;
+
+ err = 1;
+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+ pr_warn("branch root dir was changed\n");
+ err = 0;
+ goto out;
+ }
+
+ if (!isdir) {
+ AuDebugOn(!name);
+ au_iigen_dec(inode);
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&d->d_lock);
+ dname = &d->d_name;
+ if (dname->len != nlen
+ && memcmp(dname->name, name, nlen)) {
+ spin_unlock(&d->d_lock);
+ continue;
+ }
+ err = 0;
+ au_digen_dec(d);
+ spin_unlock(&d->d_lock);
+ break;
+ }
+ spin_unlock(&inode->i_lock);
+ } else {
+ au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR);
+ d = d_find_any_alias(inode);
+ if (!d) {
+ au_iigen_dec(inode);
+ goto out;
+ }
+
+ spin_lock(&d->d_lock);
+ dname = &d->d_name;
+ if (dname->len == nlen && !memcmp(dname->name, name, nlen)) {
+ spin_unlock(&d->d_lock);
+ err = hn_gen_tree(d);
+ spin_lock(&d->d_lock);
+ }
+ spin_unlock(&d->d_lock);
+ dput(d);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir)
+{
+ int err;
+
+ if (IS_ROOT(dentry)) {
+ pr_warn("branch root dir was changed\n");
+ return 0;
+ }
+
+ err = 0;
+ if (!isdir) {
+ au_digen_dec(dentry);
+ if (d_really_is_positive(dentry))
+ au_iigen_dec(d_inode(dentry));
+ } else {
+ au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR);
+ if (d_really_is_positive(dentry))
+ err = hn_gen_tree(dentry);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* hnotify job flags */
+#define AuHnJob_XINO0 1
+#define AuHnJob_GEN (1 << 1)
+#define AuHnJob_DIRENT (1 << 2)
+#define AuHnJob_ISDIR (1 << 3)
+#define AuHnJob_TRYXINO0 (1 << 4)
+#define AuHnJob_MNTPNT (1 << 5)
+#define au_ftest_hnjob(flags, name) ((flags) & AuHnJob_##name)
+#define au_fset_hnjob(flags, name) \
+ do { (flags) |= AuHnJob_##name; } while (0)
+#define au_fclr_hnjob(flags, name) \
+ do { (flags) &= ~AuHnJob_##name; } while (0)
+
+enum {
+ AuHn_CHILD,
+ AuHn_PARENT,
+ AuHnLast
+};
+
+struct au_hnotify_args {
+ struct inode *h_dir, *dir, *h_child_inode;
+ u32 mask;
+ unsigned int flags[AuHnLast];
+ unsigned int h_child_nlen;
+ char h_child_name[];
+};
+
+struct hn_job_args {
+ unsigned int flags;
+ struct inode *inode, *h_inode, *dir, *h_dir;
+ struct dentry *dentry;
+ char *h_name;
+ int h_nlen;
+};
+
+static int hn_job(struct hn_job_args *a)
+{
+ const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR);
+ int e;
+
+ /* reset xino */
+ if (au_ftest_hnjob(a->flags, XINO0) && a->inode)
+ hn_xino(a->inode, a->h_inode); /* ignore this error */
+
+ if (au_ftest_hnjob(a->flags, TRYXINO0)
+ && a->inode
+ && a->h_inode) {
+ inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+ if (!a->h_inode->i_nlink
+ && !(a->h_inode->i_state & I_LINKABLE))
+ hn_xino(a->inode, a->h_inode); /* ignore this error */
+ inode_unlock(a->h_inode);
+ }
+
+ /* make the generation obsolete */
+ if (au_ftest_hnjob(a->flags, GEN)) {
+ e = -1;
+ if (a->inode)
+ e = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode,
+ isdir);
+ if (e && a->dentry)
+ hn_gen_by_name(a->dentry, isdir);
+ /* ignore this error */
+ }
+
+ /* make dir entries obsolete */
+ if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) {
+ struct au_vdir *vdir;
+
+ vdir = au_ivdir(a->inode);
+ if (vdir)
+ vdir->vd_jiffy = 0;
+ /* IMustLock(a->inode); */
+ /* a->inode->i_version++; */
+ }
+
+ /* can do nothing but warn */
+ if (au_ftest_hnjob(a->flags, MNTPNT)
+ && a->dentry
+ && d_mountpoint(a->dentry))
+ pr_warn("mount-point %pd is removed or renamed\n", a->dentry);
+
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen,
+ struct inode *dir)
+{
+ struct dentry *dentry, *d, *parent;
+ struct qstr *dname;
+
+ parent = d_find_any_alias(dir);
+ if (!parent)
+ return NULL;
+
+ dentry = NULL;
+ spin_lock(&parent->d_lock);
+ list_for_each_entry(d, &parent->d_subdirs, d_child) {
+ /* AuDbg("%pd\n", d); */
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ dname = &d->d_name;
+ if (dname->len != nlen || memcmp(dname->name, name, nlen))
+ goto cont_unlock;
+ if (au_di(d))
+ au_digen_dec(d);
+ else
+ goto cont_unlock;
+ if (au_dcount(d) > 0) {
+ dentry = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ break;
+ }
+
+cont_unlock:
+ spin_unlock(&d->d_lock);
+ }
+ spin_unlock(&parent->d_lock);
+ dput(parent);
+
+ if (dentry)
+ di_write_lock_child(dentry);
+
+ return dentry;
+}
+
+static struct inode *lookup_wlock_by_ino(struct super_block *sb,
+ aufs_bindex_t bindex, ino_t h_ino)
+{
+ struct inode *inode;
+ ino_t ino;
+ int err;
+
+ inode = NULL;
+ err = au_xino_read(sb, bindex, h_ino, &ino);
+ if (!err && ino)
+ inode = ilookup(sb, ino);
+ if (!inode)
+ goto out;
+
+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) {
+ pr_warn("wrong root branch\n");
+ iput(inode);
+ inode = NULL;
+ goto out;
+ }
+
+ ii_write_lock_child(inode);
+
+out:
+ return inode;
+}
+
+static void au_hn_bh(void *_args)
+{
+ struct au_hnotify_args *a = _args;
+ struct super_block *sb;
+ aufs_bindex_t bindex, bend, bfound;
+ unsigned char xino, try_iput;
+ int err;
+ struct inode *inode;
+ ino_t h_ino;
+ struct hn_job_args args;
+ struct dentry *dentry;
+ struct au_sbinfo *sbinfo;
+
+ AuDebugOn(!_args);
+ AuDebugOn(!a->h_dir);
+ AuDebugOn(!a->dir);
+ AuDebugOn(!a->mask);
+ AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n",
+ a->mask, a->dir->i_ino, a->h_dir->i_ino,
+ a->h_child_inode ? a->h_child_inode->i_ino : 0);
+
+ inode = NULL;
+ dentry = NULL;
+ /*
+ * do not lock a->dir->i_mutex here
+ * because of d_revalidate() may cause a deadlock.
+ */
+ sb = a->dir->i_sb;
+ AuDebugOn(!sb);
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!sbinfo);
+ si_write_lock(sb, AuLock_NOPLMW);
+
+ ii_read_lock_parent(a->dir);
+ bfound = -1;
+ bend = au_ibend(a->dir);
+ for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++)
+ if (au_h_iptr(a->dir, bindex) == a->h_dir) {
+ bfound = bindex;
+ break;
+ }
+ ii_read_unlock(a->dir);
+ if (unlikely(bfound < 0))
+ goto out;
+
+ xino = !!au_opt_test(au_mntflags(sb), XINO);
+ h_ino = 0;
+ if (a->h_child_inode)
+ h_ino = a->h_child_inode->i_ino;
+
+ if (a->h_child_nlen
+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN)
+ || au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT)))
+ dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen,
+ a->dir);
+ try_iput = 0;
+ if (dentry && d_really_is_positive(dentry))
+ inode = d_inode(dentry);
+ if (xino && !inode && h_ino
+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0)
+ || au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0)
+ || au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) {
+ inode = lookup_wlock_by_ino(sb, bfound, h_ino);
+ try_iput = 1;
+ }
+
+ args.flags = a->flags[AuHn_CHILD];
+ args.dentry = dentry;
+ args.inode = inode;
+ args.h_inode = a->h_child_inode;
+ args.dir = a->dir;
+ args.h_dir = a->h_dir;
+ args.h_name = a->h_child_name;
+ args.h_nlen = a->h_child_nlen;
+ err = hn_job(&args);
+ if (dentry) {
+ if (au_di(dentry))
+ di_write_unlock(dentry);
+ dput(dentry);
+ }
+ if (inode && try_iput) {
+ ii_write_unlock(inode);
+ iput(inode);
+ }
+
+ ii_write_lock_parent(a->dir);
+ args.flags = a->flags[AuHn_PARENT];
+ args.dentry = NULL;
+ args.inode = a->dir;
+ args.h_inode = a->h_dir;
+ args.dir = NULL;
+ args.h_dir = NULL;
+ args.h_name = NULL;
+ args.h_nlen = 0;
+ err = hn_job(&args);
+ ii_write_unlock(a->dir);
+
+out:
+ iput(a->h_child_inode);
+ iput(a->h_dir);
+ iput(a->dir);
+ si_write_unlock(sb);
+ au_nwt_done(&sbinfo->si_nowait);
+ kfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+ struct qstr *h_child_qstr, struct inode *h_child_inode)
+{
+ int err, len;
+ unsigned int flags[AuHnLast], f;
+ unsigned char isdir, isroot, wh;
+ struct inode *dir;
+ struct au_hnotify_args *args;
+ char *p, *h_child_name;
+
+ err = 0;
+ AuDebugOn(!hnotify || !hnotify->hn_aufs_inode);
+ dir = igrab(hnotify->hn_aufs_inode);
+ if (!dir)
+ goto out;
+
+ isroot = (dir->i_ino == AUFS_ROOT_INO);
+ wh = 0;
+ h_child_name = (void *)h_child_qstr->name;
+ len = h_child_qstr->len;
+ if (h_child_name) {
+ if (len > AUFS_WH_PFX_LEN
+ && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+ h_child_name += AUFS_WH_PFX_LEN;
+ len -= AUFS_WH_PFX_LEN;
+ wh = 1;
+ }
+ }
+
+ isdir = 0;
+ if (h_child_inode)
+ isdir = !!S_ISDIR(h_child_inode->i_mode);
+ flags[AuHn_PARENT] = AuHnJob_ISDIR;
+ flags[AuHn_CHILD] = 0;
+ if (isdir)
+ flags[AuHn_CHILD] = AuHnJob_ISDIR;
+ au_fset_hnjob(flags[AuHn_PARENT], DIRENT);
+ au_fset_hnjob(flags[AuHn_CHILD], GEN);
+ switch (mask & FS_EVENTS_POSS_ON_CHILD) {
+ case FS_MOVED_FROM:
+ case FS_MOVED_TO:
+ au_fset_hnjob(flags[AuHn_CHILD], XINO0);
+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+ /*FALLTHROUGH*/
+ case FS_CREATE:
+ AuDebugOn(!h_child_name);
+ break;
+
+ case FS_DELETE:
+ /*
+ * aufs never be able to get this child inode.
+ * revalidation should be in d_revalidate()
+ * by checking i_nlink, i_generation or d_unhashed().
+ */
+ AuDebugOn(!h_child_name);
+ au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0);
+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT);
+ break;
+
+ default:
+ AuDebugOn(1);
+ }
+
+ if (wh)
+ h_child_inode = NULL;
+
+ err = -ENOMEM;
+ /* iput() and kfree() will be called in au_hnotify() */
+ args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS);
+ if (unlikely(!args)) {
+ AuErr1("no memory\n");
+ iput(dir);
+ goto out;
+ }
+ args->flags[AuHn_PARENT] = flags[AuHn_PARENT];
+ args->flags[AuHn_CHILD] = flags[AuHn_CHILD];
+ args->mask = mask;
+ args->dir = dir;
+ args->h_dir = igrab(h_dir);
+ if (h_child_inode)
+ h_child_inode = igrab(h_child_inode); /* can be NULL */
+ args->h_child_inode = h_child_inode;
+ args->h_child_nlen = len;
+ if (len) {
+ p = (void *)args;
+ p += sizeof(*args);
+ memcpy(p, h_child_name, len);
+ p[len] = 0;
+ }
+
+ /* NFS fires the event for silly-renamed one from kworker */
+ f = 0;
+ if (!dir->i_nlink
+ || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE)))
+ f = AuWkq_NEST;
+ err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f);
+ if (unlikely(err)) {
+ pr_err("wkq %d\n", err);
+ iput(args->h_child_inode);
+ iput(args->h_dir);
+ iput(args->dir);
+ kfree(args);
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm)
+{
+ int err;
+
+ AuDebugOn(!(udba & AuOptMask_UDBA));
+
+ err = 0;
+ if (au_hnotify_op.reset_br)
+ err = au_hnotify_op.reset_br(udba, br, perm);
+
+ return err;
+}
+
+int au_hnotify_init_br(struct au_branch *br, int perm)
+{
+ int err;
+
+ err = 0;
+ if (au_hnotify_op.init_br)
+ err = au_hnotify_op.init_br(br, perm);
+
+ return err;
+}
+
+void au_hnotify_fin_br(struct au_branch *br)
+{
+ if (au_hnotify_op.fin_br)
+ au_hnotify_op.fin_br(br);
+}
+
+static void au_hn_destroy_cache(void)
+{
+ kmem_cache_destroy(au_cachep[AuCache_HNOTIFY]);
+ au_cachep[AuCache_HNOTIFY] = NULL;
+}
+
+int __init au_hnotify_init(void)
+{
+ int err;
+
+ err = -ENOMEM;
+ au_cachep[AuCache_HNOTIFY] = AuCache(au_hnotify);
+ if (au_cachep[AuCache_HNOTIFY]) {
+ err = 0;
+ if (au_hnotify_op.init)
+ err = au_hnotify_op.init();
+ if (unlikely(err))
+ au_hn_destroy_cache();
+ }
+ AuTraceErr(err);
+ return err;
+}
+
+void au_hnotify_fin(void)
+{
+ if (au_hnotify_op.fin)
+ au_hnotify_op.fin();
+ /* cf. au_cache_fin() */
+ if (au_cachep[AuCache_HNOTIFY])
+ au_hn_destroy_cache();
+}
diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c
new file mode 100644
index 000000000..a70d5b050
--- /dev/null
+++ b/fs/aufs/i_op.c
@@ -0,0 +1,1406 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode operations (except add/del/rename)
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/fs_stack.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+static int h_permission(struct inode *h_inode, int mask,
+ struct vfsmount *h_mnt, int brperm)
+{
+ int err;
+ const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+
+ err = -EACCES;
+ if ((write_mask && IS_IMMUTABLE(h_inode))
+ || ((mask & MAY_EXEC)
+ && S_ISREG(h_inode->i_mode)
+ && ((h_mnt->mnt_flags & MNT_NOEXEC)
+ || !(h_inode->i_mode & S_IXUGO))))
+ goto out;
+
+ /*
+ * - skip the lower fs test in the case of write to ro branch.
+ * - nfs dir permission write check is optimized, but a policy for
+ * link/rename requires a real check.
+ * - nfs always sets MS_POSIXACL regardless its mount option 'noacl.'
+ * in this case, generic_permission() returns -EOPNOTSUPP.
+ */
+ if ((write_mask && !au_br_writable(brperm))
+ || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode)
+ && write_mask && !(mask & MAY_READ))
+ || !h_inode->i_op->permission) {
+ /* AuLabel(generic_permission); */
+ /* AuDbg("get_acl %pf\n", h_inode->i_op->get_acl); */
+ err = generic_permission(h_inode, mask);
+ if (err == -EOPNOTSUPP && au_test_nfs_noacl(h_inode))
+ err = h_inode->i_op->permission(h_inode, mask);
+ AuTraceErr(err);
+ } else {
+ /* AuLabel(h_inode->permission); */
+ err = h_inode->i_op->permission(h_inode, mask);
+ AuTraceErr(err);
+ }
+
+ if (!err)
+ err = devcgroup_inode_permission(h_inode, mask);
+ if (!err)
+ err = security_inode_permission(h_inode, mask);
+
+#if 0
+ if (!err) {
+ /* todo: do we need to call ima_path_check()? */
+ struct path h_path = {
+ .dentry =
+ .mnt = h_mnt
+ };
+ err = ima_path_check(&h_path,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ IMA_COUNT_LEAVE);
+ }
+#endif
+
+out:
+ return err;
+}
+
+static int aufs_permission(struct inode *inode, int mask)
+{
+ int err;
+ aufs_bindex_t bindex, bend;
+ const unsigned char isdir = !!S_ISDIR(inode->i_mode),
+ write_mask = !!(mask & (MAY_WRITE | MAY_APPEND));
+ struct inode *h_inode;
+ struct super_block *sb;
+ struct au_branch *br;
+
+ /* todo: support rcu-walk? */
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ ii_read_lock_child(inode);
+#if 0
+ err = au_iigen_test(inode, au_sigen(sb));
+ if (unlikely(err))
+ goto out;
+#endif
+
+ if (!isdir
+ || write_mask
+ || au_opt_test(au_mntflags(sb), DIRPERM1)) {
+ err = au_busy_or_stale();
+ h_inode = au_h_iptr(inode, au_ibstart(inode));
+ if (unlikely(!h_inode
+ || (h_inode->i_mode & S_IFMT)
+ != (inode->i_mode & S_IFMT)))
+ goto out;
+
+ err = 0;
+ bindex = au_ibstart(inode);
+ br = au_sbr(sb, bindex);
+ err = h_permission(h_inode, mask, au_br_mnt(br), br->br_perm);
+ if (write_mask
+ && !err
+ && !special_file(h_inode->i_mode)) {
+ /* test whether the upper writable branch exists */
+ err = -EROFS;
+ for (; bindex >= 0; bindex--)
+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
+ err = 0;
+ break;
+ }
+ }
+ goto out;
+ }
+
+ /* non-write to dir */
+ err = 0;
+ bend = au_ibend(inode);
+ for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) {
+ h_inode = au_h_iptr(inode, bindex);
+ if (h_inode) {
+ err = au_busy_or_stale();
+ if (unlikely(!S_ISDIR(h_inode->i_mode)))
+ break;
+
+ br = au_sbr(sb, bindex);
+ err = h_permission(h_inode, mask, au_br_mnt(br),
+ br->br_perm);
+ }
+ }
+
+out:
+ ii_read_unlock(inode);
+ si_read_unlock(sb);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct dentry *ret, *parent;
+ struct inode *inode;
+ struct super_block *sb;
+ int err, npositive;
+
+ IMustLock(dir);
+
+ /* todo: support rcu-walk? */
+ ret = ERR_PTR(-ECHILD);
+ if (flags & LOOKUP_RCU)
+ goto out;
+
+ ret = ERR_PTR(-ENAMETOOLONG);
+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+ goto out;
+
+ sb = dir->i_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ ret = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ err = au_di_init(dentry);
+ ret = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_si;
+
+ inode = NULL;
+ npositive = 0; /* suppress a warning */
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_alive_dir(parent);
+ if (!err)
+ err = au_digen_test(parent, au_sigen(sb));
+ if (!err) {
+ npositive = au_lkup_dentry(dentry, au_dbstart(parent),
+ /*type*/0);
+ err = npositive;
+ }
+ di_read_unlock(parent, AuLock_IR);
+ ret = ERR_PTR(err);
+ if (unlikely(err < 0))
+ goto out_unlock;
+
+ if (npositive) {
+ inode = au_new_inode(dentry, /*must_new*/0);
+ if (IS_ERR(inode)) {
+ ret = (void *)inode;
+ inode = NULL;
+ goto out_unlock;
+ }
+ }
+
+ if (inode)
+ atomic_inc(&inode->i_count);
+ ret = d_splice_alias(inode, dentry);
+#if 0
+ if (unlikely(d_need_lookup(dentry))) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+ spin_unlock(&dentry->d_lock);
+ } else
+#endif
+ if (inode) {
+ if (!IS_ERR(ret)) {
+ iput(inode);
+ if (ret && ret != dentry)
+ ii_write_unlock(inode);
+ } else {
+ ii_write_unlock(inode);
+ iput(inode);
+ inode = NULL;
+ }
+ }
+
+out_unlock:
+ di_write_unlock(dentry);
+ if (inode) {
+ /* verbose coding for lock class name */
+ if (unlikely(S_ISLNK(inode->i_mode)))
+ au_rw_class(&au_di(dentry)->di_rwsem,
+ au_lc_key + AuLcSymlink_DIINFO);
+ else if (unlikely(S_ISDIR(inode->i_mode)))
+ au_rw_class(&au_di(dentry)->di_rwsem,
+ au_lc_key + AuLcDir_DIINFO);
+ else /* likely */
+ au_rw_class(&au_di(dentry)->di_rwsem,
+ au_lc_key + AuLcNonDir_DIINFO);
+ }
+out_si:
+ si_read_unlock(sb);
+out:
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct aopen_node {
+ struct hlist_node hlist;
+ struct file *file, *h_file;
+};
+
+static int au_do_aopen(struct inode *inode, struct file *file)
+{
+ struct au_sphlhead *aopen;
+ struct aopen_node *node;
+ struct au_do_open_args args = {
+ .no_lock = 1,
+ .open = au_do_open_nondir
+ };
+
+ aopen = &au_sbi(inode->i_sb)->si_aopen;
+ spin_lock(&aopen->spin);
+ hlist_for_each_entry(node, &aopen->head, hlist)
+ if (node->file == file) {
+ args.h_file = node->h_file;
+ break;
+ }
+ spin_unlock(&aopen->spin);
+ /* AuDebugOn(!args.h_file); */
+
+ return au_do_open(file, &args);
+}
+
+static int aufs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned int open_flag,
+ umode_t create_mode, int *opened)
+{
+ int err, h_opened = *opened;
+ struct dentry *parent;
+ struct dentry *d;
+ struct au_sphlhead *aopen;
+ struct vfsub_aopen_args args = {
+ .open_flag = open_flag,
+ .create_mode = create_mode,
+ .opened = &h_opened
+ };
+ struct aopen_node aopen_node = {
+ .file = file
+ };
+
+ IMustLock(dir);
+ AuDbg("open_flag 0x%x\n", open_flag);
+ AuDbgDentry(dentry);
+
+ err = 0;
+ if (!au_di(dentry)) {
+ d = aufs_lookup(dir, dentry, /*flags*/0);
+ if (IS_ERR(d)) {
+ err = PTR_ERR(d);
+ goto out;
+ } else if (d) {
+ /*
+ * obsoleted dentry found.
+ * another error will be returned later.
+ */
+ d_drop(d);
+ dput(d);
+ AuDbgDentry(d);
+ }
+ AuDbgDentry(dentry);
+ }
+
+ if (d_is_positive(dentry)
+ || d_unhashed(dentry)
+ || d_unlinked(dentry)
+ || !(open_flag & O_CREAT))
+ goto out_no_open;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+ if (unlikely(err))
+ goto out;
+
+ parent = dentry->d_parent; /* dir is locked */
+ di_write_lock_parent(parent);
+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0);
+ if (unlikely(err))
+ goto out_unlock;
+
+ AuDbgDentry(dentry);
+ if (d_is_positive(dentry))
+ goto out_unlock;
+
+ args.file = get_empty_filp();
+ err = PTR_ERR(args.file);
+ if (IS_ERR(args.file))
+ goto out_unlock;
+
+ args.file->f_flags = file->f_flags;
+ err = au_aopen_or_create(dir, dentry, &args);
+ AuTraceErr(err);
+ AuDbgFile(args.file);
+ if (unlikely(err < 0)) {
+ if (h_opened & FILE_OPENED)
+ fput(args.file);
+ else
+ put_filp(args.file);
+ goto out_unlock;
+ }
+
+ /* some filesystems don't set FILE_CREATED while succeeded? */
+ *opened |= FILE_CREATED;
+ if (h_opened & FILE_OPENED)
+ aopen_node.h_file = args.file;
+ else {
+ put_filp(args.file);
+ args.file = NULL;
+ }
+ aopen = &au_sbi(dir->i_sb)->si_aopen;
+ au_sphl_add(&aopen_node.hlist, aopen);
+ err = finish_open(file, dentry, au_do_aopen, opened);
+ au_sphl_del(&aopen_node.hlist, aopen);
+ AuTraceErr(err);
+ AuDbgFile(file);
+ if (aopen_node.h_file)
+ fput(aopen_node.h_file);
+
+out_unlock:
+ di_write_unlock(parent);
+ aufs_read_unlock(dentry, AuLock_DW);
+ AuDbgDentry(dentry);
+ if (unlikely(err))
+ goto out;
+out_no_open:
+ if (!err && !(*opened & FILE_CREATED)) {
+ AuLabel(out_no_open);
+ dget(dentry);
+ err = finish_no_open(file, dentry);
+ }
+out:
+ AuDbg("%pd%s%s\n", dentry,
+ (*opened & FILE_CREATED) ? " created" : "",
+ (*opened & FILE_OPENED) ? " opened" : "");
+ AuTraceErr(err);
+ return err;
+}
+
+
+/* ---------------------------------------------------------------------- */
+
+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent,
+ const unsigned char add_entry, aufs_bindex_t bcpup,
+ aufs_bindex_t bstart)
+{
+ int err;
+ struct dentry *h_parent;
+ struct inode *h_dir;
+
+ if (add_entry)
+ IMustLock(d_inode(parent));
+ else
+ di_write_lock_parent(parent);
+
+ err = 0;
+ if (!au_h_dptr(parent, bcpup)) {
+ if (bstart > bcpup)
+ err = au_cpup_dirs(dentry, bcpup);
+ else if (bstart < bcpup)
+ err = au_cpdown_dirs(dentry, bcpup);
+ else
+ BUG();
+ }
+ if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) {
+ h_parent = au_h_dptr(parent, bcpup);
+ h_dir = d_inode(h_parent);
+ inode_lock_nested(h_dir, AuLsc_I_PARENT);
+ err = au_lkup_neg(dentry, bcpup, /*wh*/0);
+ /* todo: no unlock here */
+ inode_unlock(h_dir);
+
+ AuDbg("bcpup %d\n", bcpup);
+ if (!err) {
+ if (d_really_is_negative(dentry))
+ au_set_h_dptr(dentry, bstart, NULL);
+ au_update_dbrange(dentry, /*do_put_zero*/0);
+ }
+ }
+
+ if (!add_entry)
+ di_write_unlock(parent);
+ if (!err)
+ err = bcpup; /* success */
+
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * decide the branch and the parent dir where we will create a new entry.
+ * returns new bindex or an error.
+ * copyup the parent dir if needed.
+ */
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+ struct au_wr_dir_args *args)
+{
+ int err;
+ unsigned int flags;
+ aufs_bindex_t bcpup, bstart, src_bstart;
+ const unsigned char add_entry
+ = au_ftest_wrdir(args->flags, ADD_ENTRY)
+ | au_ftest_wrdir(args->flags, TMPFILE);
+ struct super_block *sb;
+ struct dentry *parent;
+ struct au_sbinfo *sbinfo;
+
+ sb = dentry->d_sb;
+ sbinfo = au_sbi(sb);
+ parent = dget_parent(dentry);
+ bstart = au_dbstart(dentry);
+ bcpup = bstart;
+ if (args->force_btgt < 0) {
+ if (src_dentry) {
+ src_bstart = au_dbstart(src_dentry);
+ if (src_bstart < bstart)
+ bcpup = src_bstart;
+ } else if (add_entry) {
+ flags = 0;
+ if (au_ftest_wrdir(args->flags, ISDIR))
+ au_fset_wbr(flags, DIR);
+ err = AuWbrCreate(sbinfo, dentry, flags);
+ bcpup = err;
+ }
+
+ if (bcpup < 0 || au_test_ro(sb, bcpup, d_inode(dentry))) {
+ if (add_entry)
+ err = AuWbrCopyup(sbinfo, dentry);
+ else {
+ if (!IS_ROOT(dentry)) {
+ di_read_lock_parent(parent, !AuLock_IR);
+ err = AuWbrCopyup(sbinfo, dentry);
+ di_read_unlock(parent, !AuLock_IR);
+ } else
+ err = AuWbrCopyup(sbinfo, dentry);
+ }
+ bcpup = err;
+ if (unlikely(err < 0))
+ goto out;
+ }
+ } else {
+ bcpup = args->force_btgt;
+ AuDebugOn(au_test_ro(sb, bcpup, d_inode(dentry)));
+ }
+
+ AuDbg("bstart %d, bcpup %d\n", bstart, bcpup);
+ err = bcpup;
+ if (bcpup == bstart)
+ goto out; /* success */
+
+ /* copyup the new parent into the branch we process */
+ err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart);
+ if (err >= 0) {
+ if (d_really_is_negative(dentry)) {
+ au_set_h_dptr(dentry, bstart, NULL);
+ au_set_dbstart(dentry, bcpup);
+ au_set_dbend(dentry, bcpup);
+ }
+ AuDebugOn(add_entry
+ && !au_ftest_wrdir(args->flags, TMPFILE)
+ && !au_h_dptr(dentry, bcpup));
+ }
+
+out:
+ dput(parent);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_pin_hdir_unlock(struct au_pin *p)
+{
+ if (p->hdir)
+ au_hn_imtx_unlock(p->hdir);
+}
+
+int au_pin_hdir_lock(struct au_pin *p)
+{
+ int err;
+
+ err = 0;
+ if (!p->hdir)
+ goto out;
+
+ /* even if an error happens later, keep this lock */
+ au_hn_imtx_lock_nested(p->hdir, p->lsc_hi);
+
+ err = -EBUSY;
+ if (unlikely(p->hdir->hi_inode != d_inode(p->h_parent)))
+ goto out;
+
+ err = 0;
+ if (p->h_dentry)
+ err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode,
+ p->h_parent, p->br);
+
+out:
+ return err;
+}
+
+int au_pin_hdir_relock(struct au_pin *p)
+{
+ int err, i;
+ struct inode *h_i;
+ struct dentry *h_d[] = {
+ p->h_dentry,
+ p->h_parent
+ };
+
+ err = au_pin_hdir_lock(p);
+ if (unlikely(err))
+ goto out;
+
+ for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) {
+ if (!h_d[i])
+ continue;
+ if (d_is_positive(h_d[i])) {
+ h_i = d_inode(h_d[i]);
+ err = !h_i->i_nlink;
+ }
+ }
+
+out:
+ return err;
+}
+
+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task)
+{
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+ p->hdir->hi_inode->i_mutex.owner = task;
+#endif
+}
+
+void au_pin_hdir_acquire_nest(struct au_pin *p)
+{
+ if (p->hdir) {
+ mutex_acquire_nest(&p->hdir->hi_inode->i_mutex.dep_map,
+ p->lsc_hi, 0, NULL, _RET_IP_);
+ au_pin_hdir_set_owner(p, current);
+ }
+}
+
+void au_pin_hdir_release(struct au_pin *p)
+{
+ if (p->hdir) {
+ au_pin_hdir_set_owner(p, p->task);
+ mutex_release(&p->hdir->hi_inode->i_mutex.dep_map, 1, _RET_IP_);
+ }
+}
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin)
+{
+ if (pin && pin->parent)
+ return au_h_dptr(pin->parent, pin->bindex);
+ return NULL;
+}
+
+void au_unpin(struct au_pin *p)
+{
+ if (p->hdir)
+ au_pin_hdir_unlock(p);
+ if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE))
+ vfsub_mnt_drop_write(p->h_mnt);
+ if (!p->hdir)
+ return;
+
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_unlock(p->parent, AuLock_IR);
+ iput(p->hdir->hi_inode);
+ dput(p->parent);
+ p->parent = NULL;
+ p->hdir = NULL;
+ p->h_mnt = NULL;
+ /* do not clear p->task */
+}
+
+int au_do_pin(struct au_pin *p)
+{
+ int err;
+ struct super_block *sb;
+ struct inode *h_dir;
+
+ err = 0;
+ sb = p->dentry->d_sb;
+ p->br = au_sbr(sb, p->bindex);
+ if (IS_ROOT(p->dentry)) {
+ if (au_ftest_pin(p->flags, MNT_WRITE)) {
+ p->h_mnt = au_br_mnt(p->br);
+ err = vfsub_mnt_want_write(p->h_mnt);
+ if (unlikely(err)) {
+ au_fclr_pin(p->flags, MNT_WRITE);
+ goto out_err;
+ }
+ }
+ goto out;
+ }
+
+ p->h_dentry = NULL;
+ if (p->bindex <= au_dbend(p->dentry))
+ p->h_dentry = au_h_dptr(p->dentry, p->bindex);
+
+ p->parent = dget_parent(p->dentry);
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_lock(p->parent, AuLock_IR, p->lsc_di);
+
+ h_dir = NULL;
+ p->h_parent = au_h_dptr(p->parent, p->bindex);
+ p->hdir = au_hi(d_inode(p->parent), p->bindex);
+ if (p->hdir)
+ h_dir = p->hdir->hi_inode;
+
+ /*
+ * udba case, or
+ * if DI_LOCKED is not set, then p->parent may be different
+ * and h_parent can be NULL.
+ */
+ if (unlikely(!p->hdir || !h_dir || !p->h_parent)) {
+ err = -EBUSY;
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_unlock(p->parent, AuLock_IR);
+ dput(p->parent);
+ p->parent = NULL;
+ goto out_err;
+ }
+
+ if (au_ftest_pin(p->flags, MNT_WRITE)) {
+ p->h_mnt = au_br_mnt(p->br);
+ err = vfsub_mnt_want_write(p->h_mnt);
+ if (unlikely(err)) {
+ au_fclr_pin(p->flags, MNT_WRITE);
+ if (!au_ftest_pin(p->flags, DI_LOCKED))
+ di_read_unlock(p->parent, AuLock_IR);
+ dput(p->parent);
+ p->parent = NULL;
+ goto out_err;
+ }
+ }
+
+ au_igrab(h_dir);
+ err = au_pin_hdir_lock(p);
+ if (!err)
+ goto out; /* success */
+
+ au_unpin(p);
+
+out_err:
+ pr_err("err %d\n", err);
+ err = au_busy_or_stale();
+out:
+ return err;
+}
+
+void au_pin_init(struct au_pin *p, struct dentry *dentry,
+ aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+ unsigned int udba, unsigned char flags)
+{
+ p->dentry = dentry;
+ p->udba = udba;
+ p->lsc_di = lsc_di;
+ p->lsc_hi = lsc_hi;
+ p->flags = flags;
+ p->bindex = bindex;
+
+ p->parent = NULL;
+ p->hdir = NULL;
+ p->h_mnt = NULL;
+
+ p->h_dentry = NULL;
+ p->h_parent = NULL;
+ p->br = NULL;
+ p->task = current;
+}
+
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int udba, unsigned char flags)
+{
+ au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2,
+ udba, flags);
+ return au_do_pin(pin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * ->setattr() and ->getattr() are called in various cases.
+ * chmod, stat: dentry is revalidated.
+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be
+ * unhashed.
+ * for ->setattr(), ia->ia_file is passed from ftruncate only.
+ */
+/* todo: consolidate with do_refresh() and simple_reval_dpath() */
+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen)
+{
+ int err;
+ struct dentry *parent;
+
+ err = 0;
+ if (au_digen_test(dentry, sigen)) {
+ parent = dget_parent(dentry);
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_refresh_dentry(dentry, parent);
+ di_read_unlock(parent, AuLock_IR);
+ dput(parent);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+ struct au_icpup_args *a)
+{
+ int err;
+ loff_t sz;
+ aufs_bindex_t bstart, ibstart;
+ struct dentry *hi_wh, *parent;
+ struct inode *inode;
+ struct au_wr_dir_args wr_dir_args = {
+ .force_btgt = -1,
+ .flags = 0
+ };
+
+ if (d_is_dir(dentry))
+ au_fset_wrdir(wr_dir_args.flags, ISDIR);
+ /* plink or hi_wh() case */
+ bstart = au_dbstart(dentry);
+ inode = d_inode(dentry);
+ ibstart = au_ibstart(inode);
+ if (bstart != ibstart && !au_test_ro(inode->i_sb, ibstart, inode))
+ wr_dir_args.force_btgt = ibstart;
+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+ if (unlikely(err < 0))
+ goto out;
+ a->btgt = err;
+ if (err != bstart)
+ au_fset_icpup(a->flags, DID_CPUP);
+
+ err = 0;
+ a->pin_flags = AuPin_MNT_WRITE;
+ parent = NULL;
+ if (!IS_ROOT(dentry)) {
+ au_fset_pin(a->pin_flags, DI_LOCKED);
+ parent = dget_parent(dentry);
+ di_write_lock_parent(parent);
+ }
+
+ err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags);
+ if (unlikely(err))
+ goto out_parent;
+
+ a->h_path.dentry = au_h_dptr(dentry, bstart);
+ sz = -1;
+ a->h_inode = d_inode(a->h_path.dentry);
+ if (ia && (ia->ia_valid & ATTR_SIZE)) {
+ inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+ if (ia->ia_size < i_size_read(a->h_inode))
+ sz = ia->ia_size;
+ inode_unlock(a->h_inode);
+ }
+
+ hi_wh = NULL;
+ if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) {
+ hi_wh = au_hi_wh(inode, a->btgt);
+ if (!hi_wh) {
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = a->btgt,
+ .bsrc = -1,
+ .len = sz,
+ .pin = &a->pin
+ };
+ err = au_sio_cpup_wh(&cpg, /*file*/NULL);
+ if (unlikely(err))
+ goto out_unlock;
+ hi_wh = au_hi_wh(inode, a->btgt);
+ /* todo: revalidate hi_wh? */
+ }
+ }
+
+ if (parent) {
+ au_pin_set_parent_lflag(&a->pin, /*lflag*/0);
+ di_downgrade_lock(parent, AuLock_IR);
+ dput(parent);
+ parent = NULL;
+ }
+ if (!au_ftest_icpup(a->flags, DID_CPUP))
+ goto out; /* success */
+
+ if (!d_unhashed(dentry)) {
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = a->btgt,
+ .bsrc = bstart,
+ .len = sz,
+ .pin = &a->pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ err = au_sio_cpup_simple(&cpg);
+ if (!err)
+ a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+ } else if (!hi_wh)
+ a->h_path.dentry = au_h_dptr(dentry, a->btgt);
+ else
+ a->h_path.dentry = hi_wh; /* do not dget here */
+
+out_unlock:
+ a->h_inode = d_inode(a->h_path.dentry);
+ if (!err)
+ goto out; /* success */
+ au_unpin(&a->pin);
+out_parent:
+ if (parent) {
+ di_write_unlock(parent);
+ dput(parent);
+ }
+out:
+ if (!err)
+ inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+ return err;
+}
+
+static int aufs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+ int err;
+ struct inode *inode, *delegated;
+ struct super_block *sb;
+ struct file *file;
+ struct au_icpup_args *a;
+
+ inode = d_inode(dentry);
+ IMustLock(inode);
+
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+ ia->ia_valid &= ~ATTR_MODE;
+
+ file = NULL;
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_kfree;
+
+ if (ia->ia_valid & ATTR_FILE) {
+ /* currently ftruncate(2) only */
+ AuDebugOn(!d_is_reg(dentry));
+ file = ia->ia_file;
+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1);
+ if (unlikely(err))
+ goto out_si;
+ ia->ia_file = au_hf_top(file);
+ a->udba = AuOpt_UDBA_NONE;
+ } else {
+ /* fchmod() doesn't pass ia_file */
+ a->udba = au_opt_udba(sb);
+ di_write_lock_child(dentry);
+ /* no d_unlinked(), to set UDBA_NONE for root */
+ if (d_unhashed(dentry))
+ a->udba = AuOpt_UDBA_NONE;
+ if (a->udba != AuOpt_UDBA_NONE) {
+ AuDebugOn(IS_ROOT(dentry));
+ err = au_reval_for_attr(dentry, au_sigen(sb));
+ if (unlikely(err))
+ goto out_dentry;
+ }
+ }
+
+ err = au_pin_and_icpup(dentry, ia, a);
+ if (unlikely(err < 0))
+ goto out_dentry;
+ if (au_ftest_icpup(a->flags, DID_CPUP)) {
+ ia->ia_file = NULL;
+ ia->ia_valid &= ~ATTR_FILE;
+ }
+
+ a->h_path.mnt = au_sbr_mnt(sb, a->btgt);
+ if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME))
+ == (ATTR_MODE | ATTR_CTIME)) {
+ err = security_path_chmod(&a->h_path, ia->ia_mode);
+ if (unlikely(err))
+ goto out_unlock;
+ } else if ((ia->ia_valid & (ATTR_UID | ATTR_GID))
+ && (ia->ia_valid & ATTR_CTIME)) {
+ err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid);
+ if (unlikely(err))
+ goto out_unlock;
+ }
+
+ if (ia->ia_valid & ATTR_SIZE) {
+ struct file *f;
+
+ if (ia->ia_size < i_size_read(inode))
+ /* unmap only */
+ truncate_setsize(inode, ia->ia_size);
+
+ f = NULL;
+ if (ia->ia_valid & ATTR_FILE)
+ f = ia->ia_file;
+ inode_unlock(a->h_inode);
+ err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f);
+ inode_lock_nested(a->h_inode, AuLsc_I_CHILD);
+ } else {
+ delegated = NULL;
+ while (1) {
+ err = vfsub_notify_change(&a->h_path, ia, &delegated);
+ if (delegated) {
+ err = break_deleg_wait(&delegated);
+ if (!err)
+ continue;
+ }
+ break;
+ }
+ }
+ /*
+ * regardless aufs 'acl' option setting.
+ * why don't all acl-aware fs call this func from their ->setattr()?
+ */
+ if (!err && (ia->ia_valid & ATTR_MODE))
+ err = vfsub_acl_chmod(a->h_inode, ia->ia_mode);
+ if (!err)
+ au_cpup_attr_changeable(inode);
+
+out_unlock:
+ inode_unlock(a->h_inode);
+ au_unpin(&a->pin);
+ if (unlikely(err))
+ au_update_dbstart(dentry);
+out_dentry:
+ di_write_unlock(dentry);
+ if (file) {
+ fi_write_unlock(file);
+ ia->ia_file = file;
+ ia->ia_valid |= ATTR_FILE;
+ }
+out_si:
+ si_read_unlock(sb);
+out_kfree:
+ kfree(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
+static int au_h_path_to_set_attr(struct dentry *dentry,
+ struct au_icpup_args *a, struct path *h_path)
+{
+ int err;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ a->udba = au_opt_udba(sb);
+ /* no d_unlinked(), to set UDBA_NONE for root */
+ if (d_unhashed(dentry))
+ a->udba = AuOpt_UDBA_NONE;
+ if (a->udba != AuOpt_UDBA_NONE) {
+ AuDebugOn(IS_ROOT(dentry));
+ err = au_reval_for_attr(dentry, au_sigen(sb));
+ if (unlikely(err))
+ goto out;
+ }
+ err = au_pin_and_icpup(dentry, /*ia*/NULL, a);
+ if (unlikely(err < 0))
+ goto out;
+
+ h_path->dentry = a->h_path.dentry;
+ h_path->mnt = au_sbr_mnt(sb, a->btgt);
+
+out:
+ return err;
+}
+
+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg)
+{
+ int err;
+ struct path h_path;
+ struct super_block *sb;
+ struct au_icpup_args *a;
+ struct inode *inode, *h_inode;
+
+ inode = d_inode(dentry);
+ IMustLock(inode);
+
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_kfree;
+
+ h_path.dentry = NULL; /* silence gcc */
+ di_write_lock_child(dentry);
+ err = au_h_path_to_set_attr(dentry, a, &h_path);
+ if (unlikely(err))
+ goto out_di;
+
+ inode_unlock(a->h_inode);
+ switch (arg->type) {
+ case AU_XATTR_SET:
+ err = vfsub_setxattr(h_path.dentry,
+ arg->u.set.name, arg->u.set.value,
+ arg->u.set.size, arg->u.set.flags);
+ break;
+ case AU_XATTR_REMOVE:
+ err = vfsub_removexattr(h_path.dentry, arg->u.remove.name);
+ break;
+ case AU_ACL_SET:
+ err = -EOPNOTSUPP;
+ h_inode = d_inode(h_path.dentry);
+ if (h_inode->i_op->set_acl)
+ err = h_inode->i_op->set_acl(h_inode,
+ arg->u.acl_set.acl,
+ arg->u.acl_set.type);
+ break;
+ }
+ if (!err)
+ au_cpup_attr_timesizes(inode);
+
+ au_unpin(&a->pin);
+ if (unlikely(err))
+ au_update_dbstart(dentry);
+
+out_di:
+ di_write_unlock(dentry);
+ si_read_unlock(sb);
+out_kfree:
+ kfree(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+#endif
+
+static void au_refresh_iattr(struct inode *inode, struct kstat *st,
+ unsigned int nlink)
+{
+ unsigned int n;
+
+ inode->i_mode = st->mode;
+ /* don't i_[ug]id_write() here */
+ inode->i_uid = st->uid;
+ inode->i_gid = st->gid;
+ inode->i_atime = st->atime;
+ inode->i_mtime = st->mtime;
+ inode->i_ctime = st->ctime;
+
+ au_cpup_attr_nlink(inode, /*force*/0);
+ if (S_ISDIR(inode->i_mode)) {
+ n = inode->i_nlink;
+ n -= nlink;
+ n += st->nlink;
+ smp_mb(); /* for i_nlink */
+ /* 0 can happen */
+ set_nlink(inode, n);
+ }
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks = st->blocks;
+ i_size_write(inode, st->size);
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * common routine for aufs_getattr() and aufs_getxattr().
+ * returns zero or negative (an error).
+ * @dentry will be read-locked in success.
+ */
+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path)
+{
+ int err;
+ unsigned int mnt_flags, sigen;
+ unsigned char udba_none;
+ aufs_bindex_t bindex;
+ struct super_block *sb, *h_sb;
+ struct inode *inode;
+
+ h_path->mnt = NULL;
+ h_path->dentry = NULL;
+
+ err = 0;
+ sb = dentry->d_sb;
+ mnt_flags = au_mntflags(sb);
+ udba_none = !!au_opt_test(mnt_flags, UDBA_NONE);
+
+ /* support fstat(2) */
+ if (!d_unlinked(dentry) && !udba_none) {
+ sigen = au_sigen(sb);
+ err = au_digen_test(dentry, sigen);
+ if (!err) {
+ di_read_lock_child(dentry, AuLock_IR);
+ err = au_dbrange_test(dentry);
+ if (unlikely(err)) {
+ di_read_unlock(dentry, AuLock_IR);
+ goto out;
+ }
+ } else {
+ AuDebugOn(IS_ROOT(dentry));
+ di_write_lock_child(dentry);
+ err = au_dbrange_test(dentry);
+ if (!err)
+ err = au_reval_for_attr(dentry, sigen);
+ if (!err)
+ di_downgrade_lock(dentry, AuLock_IR);
+ else {
+ di_write_unlock(dentry);
+ goto out;
+ }
+ }
+ } else
+ di_read_lock_child(dentry, AuLock_IR);
+
+ inode = d_inode(dentry);
+ bindex = au_ibstart(inode);
+ h_path->mnt = au_sbr_mnt(sb, bindex);
+ h_sb = h_path->mnt->mnt_sb;
+ if (!force
+ && !au_test_fs_bad_iattr(h_sb)
+ && udba_none)
+ goto out; /* success */
+
+ if (au_dbstart(dentry) == bindex)
+ h_path->dentry = au_h_dptr(dentry, bindex);
+ else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) {
+ h_path->dentry = au_plink_lkup(inode, bindex);
+ if (IS_ERR(h_path->dentry))
+ /* pretending success */
+ h_path->dentry = NULL;
+ else
+ dput(h_path->dentry);
+ }
+
+out:
+ return err;
+}
+
+static int aufs_getattr(struct vfsmount *mnt __maybe_unused,
+ struct dentry *dentry, struct kstat *st)
+{
+ int err;
+ unsigned char positive;
+ struct path h_path;
+ struct inode *inode;
+ struct super_block *sb;
+
+ inode = d_inode(dentry);
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+ err = au_h_path_getattr(dentry, /*force*/0, &h_path);
+ if (unlikely(err))
+ goto out_si;
+ if (unlikely(!h_path.dentry))
+ /* illegally overlapped or something */
+ goto out_fill; /* pretending success */
+
+ positive = d_is_positive(h_path.dentry);
+ if (positive)
+ err = vfs_getattr(&h_path, st);
+ if (!err) {
+ if (positive)
+ au_refresh_iattr(inode, st,
+ d_inode(h_path.dentry)->i_nlink);
+ goto out_fill; /* success */
+ }
+ AuTraceErr(err);
+ goto out_di;
+
+out_fill:
+ generic_fillattr(inode, st);
+out_di:
+ di_read_unlock(dentry, AuLock_IR);
+out_si:
+ si_read_unlock(sb);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const char *aufs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ const char *ret;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ int err;
+ aufs_bindex_t bindex;
+
+ ret = NULL; /* suppress a warning */
+ err = -ECHILD;
+ if (!dentry)
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN);
+ if (unlikely(err))
+ goto out;
+
+ err = au_d_hashed_positive(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+
+ err = -EINVAL;
+ inode = d_inode(dentry);
+ bindex = au_ibstart(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (unlikely(!h_inode->i_op->get_link))
+ goto out_unlock;
+
+ err = -EBUSY;
+ h_dentry = NULL;
+ if (au_dbstart(dentry) <= bindex) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry)
+ dget(h_dentry);
+ }
+ if (!h_dentry) {
+ h_dentry = d_find_any_alias(h_inode);
+ if (IS_ERR(h_dentry)) {
+ err = PTR_ERR(h_dentry);
+ goto out_unlock;
+ }
+ }
+ if (unlikely(!h_dentry))
+ goto out_unlock;
+
+ err = 0;
+ AuDbg("%pf\n", h_inode->i_op->get_link);
+ AuDbgDentry(h_dentry);
+ ret = h_inode->i_op->get_link(h_dentry, h_inode, done);
+ dput(h_dentry);
+ if (IS_ERR(ret))
+ err = PTR_ERR(ret);
+
+out_unlock:
+ aufs_read_unlock(dentry, AuLock_IR);
+out:
+ if (unlikely(err))
+ ret = ERR_PTR(err);
+ AuTraceErrPtr(ret);
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_update_time(struct inode *inode, struct timespec *ts, int flags)
+{
+ int err;
+ struct super_block *sb;
+ struct inode *h_inode;
+
+ sb = inode->i_sb;
+ /* mmap_sem might be acquired already, cf. aufs_mmap() */
+ lockdep_off();
+ si_read_lock(sb, AuLock_FLUSH);
+ ii_write_lock_child(inode);
+ lockdep_on();
+ h_inode = au_h_iptr(inode, au_ibstart(inode));
+ err = vfsub_update_time(h_inode, ts, flags);
+ lockdep_off();
+ if (!err)
+ au_cpup_attr_timesizes(inode);
+ ii_write_unlock(inode);
+ si_read_unlock(sb);
+ lockdep_on();
+
+ if (!err && (flags & S_VERSION))
+ inode_inc_iversion(inode);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* no getattr version will be set by module.c:aufs_init() */
+struct inode_operations aufs_iop_nogetattr[AuIop_Last],
+ aufs_iop[] = {
+ [AuIop_SYMLINK] = {
+ .permission = aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+ .get_acl = aufs_get_acl,
+ .set_acl = aufs_set_acl, /* unsupport for symlink? */
+#endif
+
+ .setattr = aufs_setattr,
+ .getattr = aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+ .setxattr = aufs_setxattr,
+ .getxattr = aufs_getxattr,
+ .listxattr = aufs_listxattr,
+ .removexattr = aufs_removexattr,
+#endif
+
+ .readlink = generic_readlink,
+ .get_link = aufs_get_link,
+
+ /* .update_time = aufs_update_time */
+ },
+ [AuIop_DIR] = {
+ .create = aufs_create,
+ .lookup = aufs_lookup,
+ .link = aufs_link,
+ .unlink = aufs_unlink,
+ .symlink = aufs_symlink,
+ .mkdir = aufs_mkdir,
+ .rmdir = aufs_rmdir,
+ .mknod = aufs_mknod,
+ .rename = aufs_rename,
+
+ .permission = aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+ .get_acl = aufs_get_acl,
+ .set_acl = aufs_set_acl,
+#endif
+
+ .setattr = aufs_setattr,
+ .getattr = aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+ .setxattr = aufs_setxattr,
+ .getxattr = aufs_getxattr,
+ .listxattr = aufs_listxattr,
+ .removexattr = aufs_removexattr,
+#endif
+
+ .update_time = aufs_update_time,
+ .atomic_open = aufs_atomic_open,
+ .tmpfile = aufs_tmpfile
+ },
+ [AuIop_OTHER] = {
+ .permission = aufs_permission,
+#ifdef CONFIG_FS_POSIX_ACL
+ .get_acl = aufs_get_acl,
+ .set_acl = aufs_set_acl,
+#endif
+
+ .setattr = aufs_setattr,
+ .getattr = aufs_getattr,
+
+#ifdef CONFIG_AUFS_XATTR
+ .setxattr = aufs_setxattr,
+ .getxattr = aufs_getxattr,
+ .listxattr = aufs_listxattr,
+ .removexattr = aufs_removexattr,
+#endif
+
+ .update_time = aufs_update_time
+ }
+};
diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c
new file mode 100644
index 000000000..533b36a2a
--- /dev/null
+++ b/fs/aufs/i_op_add.c
@@ -0,0 +1,919 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode operations (add entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * final procedure of adding a new entry, except link(2).
+ * remove whiteout, instantiate, copyup the parent dir's times and size
+ * and update version.
+ * if it failed, re-create the removed whiteout.
+ */
+static int epilog(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct dentry *dentry)
+{
+ int err, rerr;
+ aufs_bindex_t bwh;
+ struct path h_path;
+ struct super_block *sb;
+ struct inode *inode, *h_dir;
+ struct dentry *wh;
+
+ bwh = -1;
+ sb = dir->i_sb;
+ if (wh_dentry) {
+ h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
+ IMustLock(h_dir);
+ AuDebugOn(au_h_iptr(dir, bindex) != h_dir);
+ bwh = au_dbwh(dentry);
+ h_path.dentry = wh_dentry;
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path,
+ dentry);
+ if (unlikely(err))
+ goto out;
+ }
+
+ inode = au_new_inode(dentry, /*must_new*/1);
+ if (!IS_ERR(inode)) {
+ d_instantiate(dentry, inode);
+ dir = d_inode(dentry->d_parent); /* dir inode is locked */
+ IMustLock(dir);
+ au_dir_ts(dir, bindex);
+ dir->i_version++;
+ au_fhsm_wrote(sb, bindex, /*force*/0);
+ return 0; /* success */
+ }
+
+ err = PTR_ERR(inode);
+ if (!wh_dentry)
+ goto out;
+
+ /* revert */
+ /* dir inode is locked */
+ wh = au_wh_create(dentry, bwh, wh_dentry->d_parent);
+ rerr = PTR_ERR(wh);
+ if (IS_ERR(wh)) {
+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ } else
+ dput(wh);
+
+out:
+ return err;
+}
+
+static int au_d_may_add(struct dentry *dentry)
+{
+ int err;
+
+ err = 0;
+ if (unlikely(d_unhashed(dentry)))
+ err = -ENOENT;
+ if (unlikely(d_really_is_positive(dentry)))
+ err = -EEXIST;
+ return err;
+}
+
+/*
+ * simple tests for the adding inode operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir)
+{
+ int err;
+ umode_t h_mode;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ err = -ENAMETOOLONG;
+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+ goto out;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (d_really_is_negative(dentry)) {
+ err = -EEXIST;
+ if (unlikely(d_is_positive(h_dentry)))
+ goto out;
+ } else {
+ /* rename(2) case */
+ err = -EIO;
+ if (unlikely(d_is_negative(h_dentry)))
+ goto out;
+ h_inode = d_inode(h_dentry);
+ if (unlikely(!h_inode->i_nlink))
+ goto out;
+
+ h_mode = h_inode->i_mode;
+ if (!isdir) {
+ err = -EISDIR;
+ if (unlikely(S_ISDIR(h_mode)))
+ goto out;
+ } else if (unlikely(!S_ISDIR(h_mode))) {
+ err = -ENOTDIR;
+ goto out;
+ }
+ }
+
+ err = 0;
+ /* expected parent dir is locked */
+ if (unlikely(h_parent != h_dentry->d_parent))
+ err = -EIO;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * initial procedure of adding a new entry.
+ * prepare writable branch and the parent dir, lock it,
+ * and lookup whiteout for the new entry.
+ */
+static struct dentry*
+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt,
+ struct dentry *src_dentry, struct au_pin *pin,
+ struct au_wr_dir_args *wr_dir_args)
+{
+ struct dentry *wh_dentry, *h_parent;
+ struct super_block *sb;
+ struct au_branch *br;
+ int err;
+ unsigned int udba;
+ aufs_bindex_t bcpup;
+
+ AuDbg("%pd\n", dentry);
+
+ err = au_wr_dir(dentry, src_dentry, wr_dir_args);
+ bcpup = err;
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err < 0))
+ goto out;
+
+ sb = dentry->d_sb;
+ udba = au_opt_udba(sb);
+ err = au_pin(pin, dentry, bcpup, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ h_parent = au_pinned_h_parent(pin);
+ if (udba != AuOpt_UDBA_NONE
+ && au_dbstart(dentry) == bcpup)
+ err = au_may_add(dentry, bcpup, h_parent,
+ au_ftest_wrdir(wr_dir_args->flags, ISDIR));
+ else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN))
+ err = -ENAMETOOLONG;
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_unpin;
+
+ br = au_sbr(sb, bcpup);
+ if (dt) {
+ struct path tmp = {
+ .dentry = h_parent,
+ .mnt = au_br_mnt(br)
+ };
+ au_dtime_store(dt, au_pinned_parent(pin), &tmp);
+ }
+
+ wh_dentry = NULL;
+ if (bcpup != au_dbwh(dentry))
+ goto out; /* success */
+
+ /*
+ * ENAMETOOLONG here means that if we allowed create such name, then it
+ * would not be able to removed in the future. So we don't allow such
+ * name here and we don't handle ENAMETOOLONG differently here.
+ */
+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+
+out_unpin:
+ if (IS_ERR(wh_dentry))
+ au_unpin(pin);
+out:
+ return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+enum { Mknod, Symlink, Creat };
+struct simple_arg {
+ int type;
+ union {
+ struct {
+ umode_t mode;
+ bool want_excl;
+ bool try_aopen;
+ struct vfsub_aopen_args *aopen;
+ } c;
+ struct {
+ const char *symname;
+ } s;
+ struct {
+ umode_t mode;
+ dev_t dev;
+ } m;
+ } u;
+};
+
+static int add_simple(struct inode *dir, struct dentry *dentry,
+ struct simple_arg *arg)
+{
+ int err, rerr;
+ aufs_bindex_t bstart;
+ unsigned char created;
+ const unsigned char try_aopen
+ = (arg->type == Creat && arg->u.c.try_aopen);
+ struct dentry *wh_dentry, *parent;
+ struct inode *h_dir;
+ struct super_block *sb;
+ struct au_branch *br;
+ /* to reuduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct au_pin pin;
+ struct path h_path;
+ struct au_wr_dir_args wr_dir_args;
+ } *a;
+
+ AuDbg("%pd\n", dentry);
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+ a->wr_dir_args.force_btgt = -1;
+ a->wr_dir_args.flags = AuWrDir_ADD_ENTRY;
+
+ parent = dentry->d_parent; /* dir inode is locked */
+ if (!try_aopen) {
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ }
+ err = au_d_may_add(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ if (!try_aopen)
+ di_write_lock_parent(parent);
+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+ &a->pin, &a->wr_dir_args);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ bstart = au_dbstart(dentry);
+ sb = dentry->d_sb;
+ br = au_sbr(sb, bstart);
+ a->h_path.dentry = au_h_dptr(dentry, bstart);
+ a->h_path.mnt = au_br_mnt(br);
+ h_dir = au_pinned_h_dir(&a->pin);
+ switch (arg->type) {
+ case Creat:
+ err = 0;
+ if (!try_aopen || !h_dir->i_op->atomic_open)
+ err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode,
+ arg->u.c.want_excl);
+ else
+ err = vfsub_atomic_open(h_dir, a->h_path.dentry,
+ arg->u.c.aopen, br);
+ break;
+ case Symlink:
+ err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname);
+ break;
+ case Mknod:
+ err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode,
+ arg->u.m.dev);
+ break;
+ default:
+ BUG();
+ }
+ created = !err;
+ if (!err)
+ err = epilog(dir, bstart, wh_dentry, dentry);
+
+ /* revert */
+ if (unlikely(created && err && d_is_positive(a->h_path.dentry))) {
+ /* no delegation since it is just created */
+ rerr = vfsub_unlink(h_dir, &a->h_path, /*delegated*/NULL,
+ /*force*/0);
+ if (rerr) {
+ AuIOErr("%pd revert failure(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ }
+ au_dtime_revert(&a->dt);
+ }
+
+ if (!err && try_aopen && !h_dir->i_op->atomic_open)
+ *arg->u.c.aopen->opened |= FILE_CREATED;
+
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+
+out_parent:
+ if (!try_aopen)
+ di_write_unlock(parent);
+out_unlock:
+ if (unlikely(err)) {
+ au_update_dbstart(dentry);
+ d_drop(dentry);
+ }
+ if (!try_aopen)
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ kfree(a);
+out:
+ return err;
+}
+
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t dev)
+{
+ struct simple_arg arg = {
+ .type = Mknod,
+ .u.m = {
+ .mode = mode,
+ .dev = dev
+ }
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+ struct simple_arg arg = {
+ .type = Symlink,
+ .u.s.symname = symname
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool want_excl)
+{
+ struct simple_arg arg = {
+ .type = Creat,
+ .u.c = {
+ .mode = mode,
+ .want_excl = want_excl
+ }
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *aopen_args)
+{
+ struct simple_arg arg = {
+ .type = Creat,
+ .u.c = {
+ .mode = aopen_args->create_mode,
+ .want_excl = aopen_args->open_flag & O_EXCL,
+ .try_aopen = true,
+ .aopen = aopen_args
+ }
+ };
+ return add_simple(dir, dentry, &arg);
+}
+
+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct super_block *sb;
+ struct dentry *parent, *h_parent, *h_dentry;
+ struct inode *h_dir, *inode;
+ struct vfsmount *h_mnt;
+ struct au_wr_dir_args wr_dir_args = {
+ .force_btgt = -1,
+ .flags = AuWrDir_TMPFILE
+ };
+
+ /* copy-up may happen */
+ inode_lock(dir);
+
+ sb = dir->i_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+
+ err = au_di_init(dentry);
+ if (unlikely(err))
+ goto out_si;
+
+ err = -EBUSY;
+ parent = d_find_any_alias(dir);
+ AuDebugOn(!parent);
+ di_write_lock_parent(parent);
+ if (unlikely(d_inode(parent) != dir))
+ goto out_parent;
+
+ err = au_digen_test(parent, au_sigen(sb));
+ if (unlikely(err))
+ goto out_parent;
+
+ bindex = au_dbstart(parent);
+ au_set_dbstart(dentry, bindex);
+ au_set_dbend(dentry, bindex);
+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args);
+ bindex = err;
+ if (unlikely(err < 0))
+ goto out_parent;
+
+ err = -EOPNOTSUPP;
+ h_dir = au_h_iptr(dir, bindex);
+ if (unlikely(!h_dir->i_op->tmpfile))
+ goto out_parent;
+
+ h_mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_mnt_want_write(h_mnt);
+ if (unlikely(err))
+ goto out_parent;
+
+ h_parent = au_h_dptr(parent, bindex);
+ err = inode_permission(d_inode(h_parent), MAY_WRITE | MAY_EXEC);
+ if (unlikely(err))
+ goto out_mnt;
+
+ err = -ENOMEM;
+ h_dentry = d_alloc(h_parent, &dentry->d_name);
+ if (unlikely(!h_dentry))
+ goto out_mnt;
+
+ err = h_dir->i_op->tmpfile(h_dir, h_dentry, mode);
+ if (unlikely(err))
+ goto out_dentry;
+
+ au_set_dbstart(dentry, bindex);
+ au_set_dbend(dentry, bindex);
+ au_set_h_dptr(dentry, bindex, dget(h_dentry));
+ inode = au_new_inode(dentry, /*must_new*/1);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ au_set_h_dptr(dentry, bindex, NULL);
+ au_set_dbstart(dentry, -1);
+ au_set_dbend(dentry, -1);
+ } else {
+ if (!inode->i_nlink)
+ set_nlink(inode, 1);
+ d_tmpfile(dentry, inode);
+ au_di(dentry)->di_tmpfile = 1;
+
+ /* update without i_mutex */
+ if (au_ibstart(dir) == au_dbstart(dentry))
+ au_cpup_attr_timesizes(dir);
+ }
+
+out_dentry:
+ dput(h_dentry);
+out_mnt:
+ vfsub_mnt_drop_write(h_mnt);
+out_parent:
+ di_write_unlock(parent);
+ dput(parent);
+ di_write_unlock(dentry);
+ if (!err)
+#if 0
+ /* verbose coding for lock class name */
+ au_rw_class(&au_di(dentry)->di_rwsem,
+ au_lc_key + AuLcNonDir_DIINFO);
+#else
+ ;
+#endif
+ else {
+ au_di_fin(dentry);
+ dentry->d_fsdata = NULL;
+ }
+out_si:
+ si_read_unlock(sb);
+out:
+ inode_unlock(dir);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_link_args {
+ aufs_bindex_t bdst, bsrc;
+ struct au_pin pin;
+ struct path h_path;
+ struct dentry *src_parent, *parent;
+};
+
+static int au_cpup_before_link(struct dentry *src_dentry,
+ struct au_link_args *a)
+{
+ int err;
+ struct dentry *h_src_dentry;
+ struct au_cp_generic cpg = {
+ .dentry = src_dentry,
+ .bdst = a->bdst,
+ .bsrc = a->bsrc,
+ .len = -1,
+ .pin = &a->pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */
+ };
+
+ di_read_lock_parent(a->src_parent, AuLock_IR);
+ err = au_test_and_cpup_dirs(src_dentry, a->bdst);
+ if (unlikely(err))
+ goto out;
+
+ h_src_dentry = au_h_dptr(src_dentry, a->bsrc);
+ err = au_pin(&a->pin, src_dentry, a->bdst,
+ au_opt_udba(src_dentry->d_sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out;
+
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&a->pin);
+
+out:
+ di_read_unlock(a->src_parent, AuLock_IR);
+ return err;
+}
+
+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry,
+ struct au_link_args *a)
+{
+ int err;
+ unsigned char plink;
+ aufs_bindex_t bend;
+ struct dentry *h_src_dentry;
+ struct inode *h_inode, *inode, *delegated;
+ struct super_block *sb;
+ struct file *h_file;
+
+ plink = 0;
+ h_inode = NULL;
+ sb = src_dentry->d_sb;
+ inode = d_inode(src_dentry);
+ if (au_ibstart(inode) <= a->bdst)
+ h_inode = au_h_iptr(inode, a->bdst);
+ if (!h_inode || !h_inode->i_nlink) {
+ /* copyup src_dentry as the name of dentry. */
+ bend = au_dbend(dentry);
+ if (bend < a->bsrc)
+ au_set_dbend(dentry, a->bsrc);
+ au_set_h_dptr(dentry, a->bsrc,
+ dget(au_h_dptr(src_dentry, a->bsrc)));
+ dget(a->h_path.dentry);
+ au_set_h_dptr(dentry, a->bdst, NULL);
+ AuDbg("temporary d_inode...\n");
+ spin_lock(&dentry->d_lock);
+ dentry->d_inode = d_inode(src_dentry); /* tmp */
+ spin_unlock(&dentry->d_lock);
+ h_file = au_h_open_pre(dentry, a->bsrc, /*force_wr*/0);
+ if (IS_ERR(h_file))
+ err = PTR_ERR(h_file);
+ else {
+ struct au_cp_generic cpg = {
+ .dentry = dentry,
+ .bdst = a->bdst,
+ .bsrc = -1,
+ .len = -1,
+ .pin = &a->pin,
+ .flags = AuCpup_KEEPLINO
+ };
+ err = au_sio_cpup_simple(&cpg);
+ au_h_open_post(dentry, a->bsrc, h_file);
+ if (!err) {
+ dput(a->h_path.dentry);
+ a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+ } else
+ au_set_h_dptr(dentry, a->bdst,
+ a->h_path.dentry);
+ }
+ spin_lock(&dentry->d_lock);
+ dentry->d_inode = NULL; /* restore */
+ spin_unlock(&dentry->d_lock);
+ AuDbg("temporary d_inode...done\n");
+ au_set_h_dptr(dentry, a->bsrc, NULL);
+ au_set_dbend(dentry, bend);
+ } else {
+ /* the inode of src_dentry already exists on a.bdst branch */
+ h_src_dentry = d_find_alias(h_inode);
+ if (!h_src_dentry && au_plink_test(inode)) {
+ plink = 1;
+ h_src_dentry = au_plink_lkup(inode, a->bdst);
+ err = PTR_ERR(h_src_dentry);
+ if (IS_ERR(h_src_dentry))
+ goto out;
+
+ if (unlikely(d_is_negative(h_src_dentry))) {
+ dput(h_src_dentry);
+ h_src_dentry = NULL;
+ }
+
+ }
+ if (h_src_dentry) {
+ delegated = NULL;
+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+ &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ dput(h_src_dentry);
+ } else {
+ AuIOErr("no dentry found for hi%lu on b%d\n",
+ h_inode->i_ino, a->bdst);
+ err = -EIO;
+ }
+ }
+
+ if (!err && !plink)
+ au_plink_append(inode, a->bdst, a->h_path.dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ int err, rerr;
+ struct au_dtime dt;
+ struct au_link_args *a;
+ struct dentry *wh_dentry, *h_src_dentry;
+ struct inode *inode, *delegated;
+ struct super_block *sb;
+ struct au_wr_dir_args wr_dir_args = {
+ /* .force_btgt = -1, */
+ .flags = AuWrDir_ADD_ENTRY
+ };
+
+ IMustLock(dir);
+ inode = d_inode(src_dentry);
+ IMustLock(inode);
+
+ err = -ENOMEM;
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ a->parent = dentry->d_parent; /* dir inode is locked */
+ err = aufs_read_and_write_lock2(dentry, src_dentry,
+ AuLock_NOPLM | AuLock_GEN);
+ if (unlikely(err))
+ goto out_kfree;
+ err = au_d_linkable(src_dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ err = au_d_may_add(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+
+ a->src_parent = dget_parent(src_dentry);
+ wr_dir_args.force_btgt = au_ibstart(inode);
+
+ di_write_lock_parent(a->parent);
+ wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt);
+ wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin,
+ &wr_dir_args);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ err = 0;
+ sb = dentry->d_sb;
+ a->bdst = au_dbstart(dentry);
+ a->h_path.dentry = au_h_dptr(dentry, a->bdst);
+ a->h_path.mnt = au_sbr_mnt(sb, a->bdst);
+ a->bsrc = au_ibstart(inode);
+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+ if (!h_src_dentry && au_di(src_dentry)->di_tmpfile)
+ h_src_dentry = dget(au_hi_wh(inode, a->bsrc));
+ if (!h_src_dentry) {
+ a->bsrc = au_dbstart(src_dentry);
+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc);
+ AuDebugOn(!h_src_dentry);
+ } else if (IS_ERR(h_src_dentry)) {
+ err = PTR_ERR(h_src_dentry);
+ goto out_parent;
+ }
+
+ if (au_opt_test(au_mntflags(sb), PLINK)) {
+ if (a->bdst < a->bsrc
+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */)
+ err = au_cpup_or_link(src_dentry, dentry, a);
+ else {
+ delegated = NULL;
+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin),
+ &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ }
+ dput(h_src_dentry);
+ } else {
+ /*
+ * copyup src_dentry to the branch we process,
+ * and then link(2) to it.
+ */
+ dput(h_src_dentry);
+ if (a->bdst < a->bsrc
+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) {
+ au_unpin(&a->pin);
+ di_write_unlock(a->parent);
+ err = au_cpup_before_link(src_dentry, a);
+ di_write_lock_parent(a->parent);
+ if (!err)
+ err = au_pin(&a->pin, dentry, a->bdst,
+ au_opt_udba(sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (unlikely(err))
+ goto out_wh;
+ }
+ if (!err) {
+ h_src_dentry = au_h_dptr(src_dentry, a->bdst);
+ err = -ENOENT;
+ if (h_src_dentry && d_is_positive(h_src_dentry)) {
+ delegated = NULL;
+ err = vfsub_link(h_src_dentry,
+ au_pinned_h_dir(&a->pin),
+ &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry"
+ " for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ }
+ }
+ }
+ if (unlikely(err))
+ goto out_unpin;
+
+ if (wh_dentry) {
+ a->h_path.dentry = wh_dentry;
+ err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path,
+ dentry);
+ if (unlikely(err))
+ goto out_revert;
+ }
+
+ au_dir_ts(dir, a->bdst);
+ dir->i_version++;
+ inc_nlink(inode);
+ inode->i_ctime = dir->i_ctime;
+ d_instantiate(dentry, au_igrab(inode));
+ if (d_unhashed(a->h_path.dentry))
+ /* some filesystem calls d_drop() */
+ d_drop(dentry);
+ /* some filesystems consume an inode even hardlink */
+ au_fhsm_wrote(sb, a->bdst, /*force*/0);
+ goto out_unpin; /* success */
+
+out_revert:
+ /* no delegation since it is just created */
+ rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path,
+ /*delegated*/NULL, /*force*/0);
+ if (unlikely(rerr)) {
+ AuIOErr("%pd reverting failed(%d, %d)\n", dentry, err, rerr);
+ err = -EIO;
+ }
+ au_dtime_revert(&dt);
+out_unpin:
+ au_unpin(&a->pin);
+out_wh:
+ dput(wh_dentry);
+out_parent:
+ di_write_unlock(a->parent);
+ dput(a->src_parent);
+out_unlock:
+ if (unlikely(err)) {
+ au_update_dbstart(dentry);
+ d_drop(dentry);
+ }
+ aufs_read_and_write_unlock2(dentry, src_dentry);
+out_kfree:
+ kfree(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ int err, rerr;
+ aufs_bindex_t bindex;
+ unsigned char diropq;
+ struct path h_path;
+ struct dentry *wh_dentry, *parent, *opq_dentry;
+ struct inode *h_inode;
+ struct super_block *sb;
+ struct {
+ struct au_pin pin;
+ struct au_dtime dt;
+ } *a; /* reduce the stack usage */
+ struct au_wr_dir_args wr_dir_args = {
+ .force_btgt = -1,
+ .flags = AuWrDir_ADD_ENTRY | AuWrDir_ISDIR
+ };
+
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ err = au_d_may_add(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_write_lock_parent(parent);
+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL,
+ &a->pin, &wr_dir_args);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ sb = dentry->d_sb;
+ bindex = au_dbstart(dentry);
+ h_path.dentry = au_h_dptr(dentry, bindex);
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode);
+ if (unlikely(err))
+ goto out_unpin;
+
+ /* make the dir opaque */
+ diropq = 0;
+ h_inode = d_inode(h_path.dentry);
+ if (wh_dentry
+ || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) {
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ opq_dentry = au_diropq_create(dentry, bindex);
+ inode_unlock(h_inode);
+ err = PTR_ERR(opq_dentry);
+ if (IS_ERR(opq_dentry))
+ goto out_dir;
+ dput(opq_dentry);
+ diropq = 1;
+ }
+
+ err = epilog(dir, bindex, wh_dentry, dentry);
+ if (!err) {
+ inc_nlink(dir);
+ goto out_unpin; /* success */
+ }
+
+ /* revert */
+ if (diropq) {
+ AuLabel(revert opq);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ rerr = au_diropq_remove(dentry, bindex);
+ inode_unlock(h_inode);
+ if (rerr) {
+ AuIOErr("%pd reverting diropq failed(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ }
+ }
+
+out_dir:
+ AuLabel(revert dir);
+ rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path);
+ if (rerr) {
+ AuIOErr("%pd reverting dir failed(%d, %d)\n",
+ dentry, err, rerr);
+ err = -EIO;
+ }
+ au_dtime_revert(&a->dt);
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+out_parent:
+ di_write_unlock(parent);
+out_unlock:
+ if (unlikely(err)) {
+ au_update_dbstart(dentry);
+ d_drop(dentry);
+ }
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ kfree(a);
+out:
+ return err;
+}
diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c
new file mode 100644
index 000000000..68741aadb
--- /dev/null
+++ b/fs/aufs/i_op_del.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode operations (del entry)
+ */
+
+#include "aufs.h"
+
+/*
+ * decide if a new whiteout for @dentry is necessary or not.
+ * when it is necessary, prepare the parent dir for the upper branch whose
+ * branch index is @bcpup for creation. the actual creation of the whiteout will
+ * be done by caller.
+ * return value:
+ * 0: wh is unnecessary
+ * plus: wh is necessary
+ * minus: error
+ */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup)
+{
+ int need_wh, err;
+ aufs_bindex_t bstart;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ bstart = au_dbstart(dentry);
+ if (*bcpup < 0) {
+ *bcpup = bstart;
+ if (au_test_ro(sb, bstart, d_inode(dentry))) {
+ err = AuWbrCopyup(au_sbi(sb), dentry);
+ *bcpup = err;
+ if (unlikely(err < 0))
+ goto out;
+ }
+ } else
+ AuDebugOn(bstart < *bcpup
+ || au_test_ro(sb, *bcpup, d_inode(dentry)));
+ AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart);
+
+ if (*bcpup != bstart) {
+ err = au_cpup_dirs(dentry, *bcpup);
+ if (unlikely(err))
+ goto out;
+ need_wh = 1;
+ } else {
+ struct au_dinfo *dinfo, *tmp;
+
+ need_wh = -ENOMEM;
+ dinfo = au_di(dentry);
+ tmp = au_di_alloc(sb, AuLsc_DI_TMP);
+ if (tmp) {
+ au_di_cp(tmp, dinfo);
+ au_di_swap(tmp, dinfo);
+ /* returns the number of positive dentries */
+ need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0);
+ au_di_swap(tmp, dinfo);
+ au_rw_write_unlock(&tmp->di_rwsem);
+ au_di_free(tmp);
+ }
+ }
+ AuDbg("need_wh %d\n", need_wh);
+ err = need_wh;
+
+out:
+ return err;
+}
+
+/*
+ * simple tests for the del-entry operations.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir)
+{
+ int err;
+ umode_t h_mode;
+ struct dentry *h_dentry, *h_latest;
+ struct inode *h_inode;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (d_really_is_positive(dentry)) {
+ err = -ENOENT;
+ if (unlikely(d_is_negative(h_dentry)))
+ goto out;
+ h_inode = d_inode(h_dentry);
+ if (unlikely(!h_inode->i_nlink))
+ goto out;
+
+ h_mode = h_inode->i_mode;
+ if (!isdir) {
+ err = -EISDIR;
+ if (unlikely(S_ISDIR(h_mode)))
+ goto out;
+ } else if (unlikely(!S_ISDIR(h_mode))) {
+ err = -ENOTDIR;
+ goto out;
+ }
+ } else {
+ /* rename(2) case */
+ err = -EIO;
+ if (unlikely(d_is_positive(h_dentry)))
+ goto out;
+ }
+
+ err = -ENOENT;
+ /* expected parent dir is locked */
+ if (unlikely(h_parent != h_dentry->d_parent))
+ goto out;
+ err = 0;
+
+ /*
+ * rmdir a dir may break the consistency on some filesystem.
+ * let's try heavy test.
+ */
+ err = -EACCES;
+ if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1)
+ && au_test_h_perm(d_inode(h_parent),
+ MAY_EXEC | MAY_WRITE)))
+ goto out;
+
+ h_latest = au_sio_lkup_one(&dentry->d_name, h_parent);
+ err = -EIO;
+ if (IS_ERR(h_latest))
+ goto out;
+ if (h_latest == h_dentry)
+ err = 0;
+ dput(h_latest);
+
+out:
+ return err;
+}
+
+/*
+ * decide the branch where we operate for @dentry. the branch index will be set
+ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent
+ * dir for reverting.
+ * when a new whiteout is necessary, create it.
+ */
+static struct dentry*
+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup,
+ struct au_dtime *dt, struct au_pin *pin)
+{
+ struct dentry *wh_dentry;
+ struct super_block *sb;
+ struct path h_path;
+ int err, need_wh;
+ unsigned int udba;
+ aufs_bindex_t bcpup;
+
+ need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup);
+ wh_dentry = ERR_PTR(need_wh);
+ if (unlikely(need_wh < 0))
+ goto out;
+
+ sb = dentry->d_sb;
+ udba = au_opt_udba(sb);
+ bcpup = *rbcpup;
+ err = au_pin(pin, dentry, bcpup, udba,
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ h_path.dentry = au_pinned_h_parent(pin);
+ if (udba != AuOpt_UDBA_NONE
+ && au_dbstart(dentry) == bcpup) {
+ err = au_may_del(dentry, bcpup, h_path.dentry, isdir);
+ wh_dentry = ERR_PTR(err);
+ if (unlikely(err))
+ goto out_unpin;
+ }
+
+ h_path.mnt = au_sbr_mnt(sb, bcpup);
+ au_dtime_store(dt, au_pinned_parent(pin), &h_path);
+ wh_dentry = NULL;
+ if (!need_wh)
+ goto out; /* success, no need to create whiteout */
+
+ wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_unpin;
+
+ /* returns with the parent is locked and wh_dentry is dget-ed */
+ goto out; /* success */
+
+out_unpin:
+ au_unpin(pin);
+out:
+ return wh_dentry;
+}
+
+/*
+ * when removing a dir, rename it to a unique temporary whiteout-ed name first
+ * in order to be revertible and save time for removing many child whiteouts
+ * under the dir.
+ * returns 1 when there are too many child whiteout and caller should remove
+ * them asynchronously. returns 0 when the number of children is enough small to
+ * remove now or the branch fs is a remote fs.
+ * otherwise return an error.
+ */
+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex,
+ struct au_nhash *whlist, struct inode *dir)
+{
+ int rmdir_later, err, dirwh;
+ struct dentry *h_dentry;
+ struct super_block *sb;
+ struct inode *inode;
+
+ sb = dentry->d_sb;
+ SiMustAnyLock(sb);
+ h_dentry = au_h_dptr(dentry, bindex);
+ err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex));
+ if (unlikely(err))
+ goto out;
+
+ /* stop monitoring */
+ inode = d_inode(dentry);
+ au_hn_free(au_hi(inode, bindex));
+
+ if (!au_test_fs_remote(h_dentry->d_sb)) {
+ dirwh = au_sbi(sb)->si_dirwh;
+ rmdir_later = (dirwh <= 1);
+ if (!rmdir_later)
+ rmdir_later = au_nhash_test_longer_wh(whlist, bindex,
+ dirwh);
+ if (rmdir_later)
+ return rmdir_later;
+ }
+
+ err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist);
+ if (unlikely(err)) {
+ AuIOErr("rmdir %pd, b%d failed, %d. ignored\n",
+ h_dentry, bindex, err);
+ err = 0;
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * final procedure for deleting a entry.
+ * maintain dentry and iattr.
+ */
+static void epilog(struct inode *dir, struct dentry *dentry,
+ aufs_bindex_t bindex)
+{
+ struct inode *inode;
+
+ inode = d_inode(dentry);
+ d_drop(dentry);
+ inode->i_ctime = dir->i_ctime;
+
+ au_dir_ts(dir, bindex);
+ dir->i_version++;
+}
+
+/*
+ * when an error happened, remove the created whiteout and revert everything.
+ */
+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex,
+ aufs_bindex_t bwh, struct dentry *wh_dentry,
+ struct dentry *dentry, struct au_dtime *dt)
+{
+ int rerr;
+ struct path h_path = {
+ .dentry = wh_dentry,
+ .mnt = au_sbr_mnt(dir->i_sb, bindex)
+ };
+
+ rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry);
+ if (!rerr) {
+ au_set_dbwh(dentry, bwh);
+ au_dtime_revert(dt);
+ return 0;
+ }
+
+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", dentry, err, rerr);
+ return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bwh, bindex, bstart;
+ struct inode *inode, *h_dir, *delegated;
+ struct dentry *parent, *wh_dentry;
+ /* to reuduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct au_pin pin;
+ struct path h_path;
+ } *a;
+
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ err = au_d_hashed_positive(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+ err = -EISDIR;
+ if (unlikely(d_is_dir(dentry)))
+ goto out_unlock; /* possible? */
+
+ bstart = au_dbstart(dentry);
+ bwh = au_dbwh(dentry);
+ bindex = -1;
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_write_lock_parent(parent);
+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt,
+ &a->pin);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart);
+ a->h_path.dentry = au_h_dptr(dentry, bstart);
+ dget(a->h_path.dentry);
+ if (bindex == bstart) {
+ h_dir = au_pinned_h_dir(&a->pin);
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, &a->h_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ } else {
+ /* dir inode is locked */
+ h_dir = d_inode(wh_dentry->d_parent);
+ IMustLock(h_dir);
+ err = 0;
+ }
+
+ if (!err) {
+ vfsub_drop_nlink(inode);
+ epilog(dir, dentry, bindex);
+
+ /* update target timestamps */
+ if (bindex == bstart) {
+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL);
+ /*ignore*/
+ inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+ } else
+ /* todo: this timestamp may be reverted later */
+ inode->i_ctime = h_dir->i_ctime;
+ goto out_unpin; /* success */
+ }
+
+ /* revert */
+ if (wh_dentry) {
+ int rerr;
+
+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+ &a->dt);
+ if (rerr)
+ err = rerr;
+ }
+
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+ dput(a->h_path.dentry);
+out_parent:
+ di_write_unlock(parent);
+out_unlock:
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ kfree(a);
+out:
+ return err;
+}
+
+int aufs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ int err, rmdir_later;
+ aufs_bindex_t bwh, bindex, bstart;
+ struct inode *inode;
+ struct dentry *parent, *wh_dentry, *h_dentry;
+ struct au_whtmp_rmdir *args;
+ /* to reuduce stack size */
+ struct {
+ struct au_dtime dt;
+ struct au_pin pin;
+ } *a;
+
+ IMustLock(dir);
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN);
+ if (unlikely(err))
+ goto out_free;
+ err = au_alive_dir(dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ inode = d_inode(dentry);
+ IMustLock(inode);
+ err = -ENOTDIR;
+ if (unlikely(!d_is_dir(dentry)))
+ goto out_unlock; /* possible? */
+
+ err = -ENOMEM;
+ args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS);
+ if (unlikely(!args))
+ goto out_unlock;
+
+ parent = dentry->d_parent; /* dir inode is locked */
+ di_write_lock_parent(parent);
+ err = au_test_empty(dentry, &args->whlist);
+ if (unlikely(err))
+ goto out_parent;
+
+ bstart = au_dbstart(dentry);
+ bwh = au_dbwh(dentry);
+ bindex = -1;
+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt,
+ &a->pin);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry))
+ goto out_parent;
+
+ h_dentry = au_h_dptr(dentry, bstart);
+ dget(h_dentry);
+ rmdir_later = 0;
+ if (bindex == bstart) {
+ err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir);
+ if (err > 0) {
+ rmdir_later = err;
+ err = 0;
+ }
+ } else {
+ /* stop monitoring */
+ au_hn_free(au_hi(inode, bstart));
+
+ /* dir inode is locked */
+ IMustLock(d_inode(wh_dentry->d_parent));
+ err = 0;
+ }
+
+ if (!err) {
+ vfsub_dead_dir(inode);
+ au_set_dbdiropq(dentry, -1);
+ epilog(dir, dentry, bindex);
+
+ if (rmdir_later) {
+ au_whtmp_kick_rmdir(dir, bstart, h_dentry, args);
+ args = NULL;
+ }
+
+ goto out_unpin; /* success */
+ }
+
+ /* revert */
+ AuLabel(revert);
+ if (wh_dentry) {
+ int rerr;
+
+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry,
+ &a->dt);
+ if (rerr)
+ err = rerr;
+ }
+
+out_unpin:
+ au_unpin(&a->pin);
+ dput(wh_dentry);
+ dput(h_dentry);
+out_parent:
+ di_write_unlock(parent);
+ if (args)
+ au_whtmp_rmdir_free(args);
+out_unlock:
+ aufs_read_unlock(dentry, AuLock_DW);
+out_free:
+ kfree(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c
new file mode 100644
index 000000000..de4e03bf0
--- /dev/null
+++ b/fs/aufs/i_op_ren.c
@@ -0,0 +1,1002 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode operation (rename entry)
+ * todo: this is crazy monster
+ */
+
+#include "aufs.h"
+
+enum { AuSRC, AuDST, AuSrcDst };
+enum { AuPARENT, AuCHILD, AuParentChild };
+
+#define AuRen_ISDIR 1
+#define AuRen_ISSAMEDIR (1 << 1)
+#define AuRen_WHSRC (1 << 2)
+#define AuRen_WHDST (1 << 3)
+#define AuRen_MNT_WRITE (1 << 4)
+#define AuRen_DT_DSTDIR (1 << 5)
+#define AuRen_DIROPQ (1 << 6)
+#define au_ftest_ren(flags, name) ((flags) & AuRen_##name)
+#define au_fset_ren(flags, name) \
+ do { (flags) |= AuRen_##name; } while (0)
+#define au_fclr_ren(flags, name) \
+ do { (flags) &= ~AuRen_##name; } while (0)
+
+struct au_ren_args {
+ struct {
+ struct dentry *dentry, *h_dentry, *parent, *h_parent,
+ *wh_dentry;
+ struct inode *dir, *inode;
+ struct au_hinode *hdir;
+ struct au_dtime dt[AuParentChild];
+ aufs_bindex_t bstart;
+ } sd[AuSrcDst];
+
+#define src_dentry sd[AuSRC].dentry
+#define src_dir sd[AuSRC].dir
+#define src_inode sd[AuSRC].inode
+#define src_h_dentry sd[AuSRC].h_dentry
+#define src_parent sd[AuSRC].parent
+#define src_h_parent sd[AuSRC].h_parent
+#define src_wh_dentry sd[AuSRC].wh_dentry
+#define src_hdir sd[AuSRC].hdir
+#define src_h_dir sd[AuSRC].hdir->hi_inode
+#define src_dt sd[AuSRC].dt
+#define src_bstart sd[AuSRC].bstart
+
+#define dst_dentry sd[AuDST].dentry
+#define dst_dir sd[AuDST].dir
+#define dst_inode sd[AuDST].inode
+#define dst_h_dentry sd[AuDST].h_dentry
+#define dst_parent sd[AuDST].parent
+#define dst_h_parent sd[AuDST].h_parent
+#define dst_wh_dentry sd[AuDST].wh_dentry
+#define dst_hdir sd[AuDST].hdir
+#define dst_h_dir sd[AuDST].hdir->hi_inode
+#define dst_dt sd[AuDST].dt
+#define dst_bstart sd[AuDST].bstart
+
+ struct dentry *h_trap;
+ struct au_branch *br;
+ struct au_hinode *src_hinode;
+ struct path h_path;
+ struct au_nhash whlist;
+ aufs_bindex_t btgt, src_bwh, src_bdiropq;
+
+ unsigned int flags;
+
+ struct au_whtmp_rmdir *thargs;
+ struct dentry *h_dst;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * functions for reverting.
+ * when an error happened in a single rename systemcall, we should revert
+ * everything as if nothing happened.
+ * we don't need to revert the copied-up/down the parent dir since they are
+ * harmless.
+ */
+
+#define RevertFailure(fmt, ...) do { \
+ AuIOErr("revert failure: " fmt " (%d, %d)\n", \
+ ##__VA_ARGS__, err, rerr); \
+ err = -EIO; \
+} while (0)
+
+static void au_ren_rev_diropq(int err, struct au_ren_args *a)
+{
+ int rerr;
+
+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
+ rerr = au_diropq_remove(a->src_dentry, a->btgt);
+ au_hn_imtx_unlock(a->src_hinode);
+ au_set_dbdiropq(a->src_dentry, a->src_bdiropq);
+ if (rerr)
+ RevertFailure("remove diropq %pd", a->src_dentry);
+}
+
+static void au_ren_rev_rename(int err, struct au_ren_args *a)
+{
+ int rerr;
+ struct inode *delegated;
+
+ a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name,
+ a->src_h_parent);
+ rerr = PTR_ERR(a->h_path.dentry);
+ if (IS_ERR(a->h_path.dentry)) {
+ RevertFailure("lkup one %pd", a->src_dentry);
+ return;
+ }
+
+ delegated = NULL;
+ rerr = vfsub_rename(a->dst_h_dir,
+ au_h_dptr(a->src_dentry, a->btgt),
+ a->src_h_dir, &a->h_path, &delegated);
+ if (unlikely(rerr == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ d_drop(a->h_path.dentry);
+ dput(a->h_path.dentry);
+ /* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */
+ if (rerr)
+ RevertFailure("rename %pd", a->src_dentry);
+}
+
+static void au_ren_rev_whtmp(int err, struct au_ren_args *a)
+{
+ int rerr;
+ struct inode *delegated;
+
+ a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name,
+ a->dst_h_parent);
+ rerr = PTR_ERR(a->h_path.dentry);
+ if (IS_ERR(a->h_path.dentry)) {
+ RevertFailure("lkup one %pd", a->dst_dentry);
+ return;
+ }
+ if (d_is_positive(a->h_path.dentry)) {
+ d_drop(a->h_path.dentry);
+ dput(a->h_path.dentry);
+ return;
+ }
+
+ delegated = NULL;
+ rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path,
+ &delegated);
+ if (unlikely(rerr == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ d_drop(a->h_path.dentry);
+ dput(a->h_path.dentry);
+ if (!rerr)
+ au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst));
+ else
+ RevertFailure("rename %pd", a->h_dst);
+}
+
+static void au_ren_rev_whsrc(int err, struct au_ren_args *a)
+{
+ int rerr;
+
+ a->h_path.dentry = a->src_wh_dentry;
+ rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry);
+ au_set_dbwh(a->src_dentry, a->src_bwh);
+ if (rerr)
+ RevertFailure("unlink %pd", a->src_wh_dentry);
+}
+#undef RevertFailure
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * when we have to copyup the renaming entry, do it with the rename-target name
+ * in order to minimize the cost (the later actual rename is unnecessary).
+ * otherwise rename it on the target branch.
+ */
+static int au_ren_or_cpup(struct au_ren_args *a)
+{
+ int err;
+ struct dentry *d;
+ struct inode *delegated;
+
+ d = a->src_dentry;
+ if (au_dbstart(d) == a->btgt) {
+ a->h_path.dentry = a->dst_h_dentry;
+ if (au_ftest_ren(a->flags, DIROPQ)
+ && au_dbdiropq(d) == a->btgt)
+ au_fclr_ren(a->flags, DIROPQ);
+ AuDebugOn(au_dbstart(d) != a->btgt);
+ delegated = NULL;
+ err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt),
+ a->dst_h_dir, &a->h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ } else
+ BUG();
+
+ if (!err && a->h_dst)
+ /* it will be set to dinfo later */
+ dget(a->h_dst);
+
+ return err;
+}
+
+/* cf. aufs_rmdir() */
+static int au_ren_del_whtmp(struct au_ren_args *a)
+{
+ int err;
+ struct inode *dir;
+
+ dir = a->dst_dir;
+ SiMustAnyLock(dir->i_sb);
+ if (!au_nhash_test_longer_wh(&a->whlist, a->btgt,
+ au_sbi(dir->i_sb)->si_dirwh)
+ || au_test_fs_remote(a->h_dst->d_sb)) {
+ err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist);
+ if (unlikely(err))
+ pr_warn("failed removing whtmp dir %pd (%d), "
+ "ignored.\n", a->h_dst, err);
+ } else {
+ au_nhash_wh_free(&a->thargs->whlist);
+ a->thargs->whlist = a->whlist;
+ a->whlist.nh_num = 0;
+ au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs);
+ dput(a->h_dst);
+ a->thargs = NULL;
+ }
+
+ return 0;
+}
+
+/* make it 'opaque' dir. */
+static int au_ren_diropq(struct au_ren_args *a)
+{
+ int err;
+ struct dentry *diropq;
+
+ err = 0;
+ a->src_bdiropq = au_dbdiropq(a->src_dentry);
+ a->src_hinode = au_hi(a->src_inode, a->btgt);
+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD);
+ diropq = au_diropq_create(a->src_dentry, a->btgt);
+ au_hn_imtx_unlock(a->src_hinode);
+ if (IS_ERR(diropq))
+ err = PTR_ERR(diropq);
+ else
+ dput(diropq);
+
+ return err;
+}
+
+static int do_rename(struct au_ren_args *a)
+{
+ int err;
+ struct dentry *d, *h_d;
+
+ /* prepare workqueue args for asynchronous rmdir */
+ h_d = a->dst_h_dentry;
+ if (au_ftest_ren(a->flags, ISDIR) && d_is_positive(h_d)) {
+ err = -ENOMEM;
+ a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS);
+ if (unlikely(!a->thargs))
+ goto out;
+ a->h_dst = dget(h_d);
+ }
+
+ /* create whiteout for src_dentry */
+ if (au_ftest_ren(a->flags, WHSRC)) {
+ a->src_bwh = au_dbwh(a->src_dentry);
+ AuDebugOn(a->src_bwh >= 0);
+ a->src_wh_dentry
+ = au_wh_create(a->src_dentry, a->btgt, a->src_h_parent);
+ err = PTR_ERR(a->src_wh_dentry);
+ if (IS_ERR(a->src_wh_dentry))
+ goto out_thargs;
+ }
+
+ /* lookup whiteout for dentry */
+ if (au_ftest_ren(a->flags, WHDST)) {
+ h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name,
+ a->br);
+ err = PTR_ERR(h_d);
+ if (IS_ERR(h_d))
+ goto out_whsrc;
+ if (d_is_negative(h_d))
+ dput(h_d);
+ else
+ a->dst_wh_dentry = h_d;
+ }
+
+ /* rename dentry to tmpwh */
+ if (a->thargs) {
+ err = au_whtmp_ren(a->dst_h_dentry, a->br);
+ if (unlikely(err))
+ goto out_whdst;
+
+ d = a->dst_dentry;
+ au_set_h_dptr(d, a->btgt, NULL);
+ err = au_lkup_neg(d, a->btgt, /*wh*/0);
+ if (unlikely(err))
+ goto out_whtmp;
+ a->dst_h_dentry = au_h_dptr(d, a->btgt);
+ }
+
+ BUG_ON(d_is_positive(a->dst_h_dentry) && a->src_bstart != a->btgt);
+
+ /* rename by vfs_rename or cpup */
+ d = a->dst_dentry;
+ if (au_ftest_ren(a->flags, ISDIR)
+ && (a->dst_wh_dentry
+ || au_dbdiropq(d) == a->btgt
+ /* hide the lower to keep xino */
+ || a->btgt < au_dbend(d)
+ || au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ)))
+ au_fset_ren(a->flags, DIROPQ);
+ err = au_ren_or_cpup(a);
+ if (unlikely(err))
+ /* leave the copied-up one */
+ goto out_whtmp;
+
+ /* make dir opaque */
+ if (au_ftest_ren(a->flags, DIROPQ)) {
+ err = au_ren_diropq(a);
+ if (unlikely(err))
+ goto out_rename;
+ }
+
+ /* update target timestamps */
+ AuDebugOn(au_dbstart(a->src_dentry) != a->btgt);
+ a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt);
+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/
+ a->src_inode->i_ctime = d_inode(a->h_path.dentry)->i_ctime;
+
+ /* remove whiteout for dentry */
+ if (a->dst_wh_dentry) {
+ a->h_path.dentry = a->dst_wh_dentry;
+ err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path,
+ a->dst_dentry);
+ if (unlikely(err))
+ goto out_diropq;
+ }
+
+ /* remove whtmp */
+ if (a->thargs)
+ au_ren_del_whtmp(a); /* ignore this error */
+
+ au_fhsm_wrote(a->src_dentry->d_sb, a->btgt, /*force*/0);
+ err = 0;
+ goto out_success;
+
+out_diropq:
+ if (au_ftest_ren(a->flags, DIROPQ))
+ au_ren_rev_diropq(err, a);
+out_rename:
+ au_ren_rev_rename(err, a);
+ dput(a->h_dst);
+out_whtmp:
+ if (a->thargs)
+ au_ren_rev_whtmp(err, a);
+out_whdst:
+ dput(a->dst_wh_dentry);
+ a->dst_wh_dentry = NULL;
+out_whsrc:
+ if (a->src_wh_dentry)
+ au_ren_rev_whsrc(err, a);
+out_success:
+ dput(a->src_wh_dentry);
+ dput(a->dst_wh_dentry);
+out_thargs:
+ if (a->thargs) {
+ dput(a->h_dst);
+ au_whtmp_rmdir_free(a->thargs);
+ a->thargs = NULL;
+ }
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if @dentry dir can be rename destination or not.
+ * success means, it is a logically empty dir.
+ */
+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist)
+{
+ return au_test_empty(dentry, whlist);
+}
+
+/*
+ * test if @dentry dir can be rename source or not.
+ * if it can, return 0 and @children is filled.
+ * success means,
+ * - it is a logically empty dir.
+ * - or, it exists on writable branch and has no children including whiteouts
+ * on the lower branch.
+ */
+static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt)
+{
+ int err;
+ unsigned int rdhash;
+ aufs_bindex_t bstart;
+
+ bstart = au_dbstart(dentry);
+ if (bstart != btgt) {
+ struct au_nhash whlist;
+
+ SiMustAnyLock(dentry->d_sb);
+ rdhash = au_sbi(dentry->d_sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL,
+ dentry));
+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_test_empty(dentry, &whlist);
+ au_nhash_wh_free(&whlist);
+ goto out;
+ }
+
+ if (bstart == au_dbtaildir(dentry))
+ return 0; /* success */
+
+ err = au_test_empty_lower(dentry);
+
+out:
+ if (err == -ENOTEMPTY) {
+ AuWarn1("renaming dir who has child(ren) on multiple branches,"
+ " is not supported\n");
+ err = -EXDEV;
+ }
+ return err;
+}
+
+/* side effect: sets whlist and h_dentry */
+static int au_ren_may_dir(struct au_ren_args *a)
+{
+ int err;
+ unsigned int rdhash;
+ struct dentry *d;
+
+ d = a->dst_dentry;
+ SiMustAnyLock(d->d_sb);
+
+ err = 0;
+ if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) {
+ rdhash = au_sbi(d->d_sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d));
+ err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+
+ au_set_dbstart(d, a->dst_bstart);
+ err = may_rename_dstdir(d, &a->whlist);
+ au_set_dbstart(d, a->btgt);
+ }
+ a->dst_h_dentry = au_h_dptr(d, au_dbstart(d));
+ if (unlikely(err))
+ goto out;
+
+ d = a->src_dentry;
+ a->src_h_dentry = au_h_dptr(d, au_dbstart(d));
+ if (au_ftest_ren(a->flags, ISDIR)) {
+ err = may_rename_srcdir(d, a->btgt);
+ if (unlikely(err)) {
+ au_nhash_wh_free(&a->whlist);
+ a->whlist.nh_num = 0;
+ }
+ }
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * simple tests for rename.
+ * following the checks in vfs, plus the parent-child relationship.
+ */
+static int au_may_ren(struct au_ren_args *a)
+{
+ int err, isdir;
+ struct inode *h_inode;
+
+ if (a->src_bstart == a->btgt) {
+ err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent,
+ au_ftest_ren(a->flags, ISDIR));
+ if (unlikely(err))
+ goto out;
+ err = -EINVAL;
+ if (unlikely(a->src_h_dentry == a->h_trap))
+ goto out;
+ }
+
+ err = 0;
+ if (a->dst_bstart != a->btgt)
+ goto out;
+
+ err = -ENOTEMPTY;
+ if (unlikely(a->dst_h_dentry == a->h_trap))
+ goto out;
+
+ err = -EIO;
+ isdir = !!au_ftest_ren(a->flags, ISDIR);
+ if (d_really_is_negative(a->dst_dentry)) {
+ if (d_is_negative(a->dst_h_dentry))
+ err = au_may_add(a->dst_dentry, a->btgt,
+ a->dst_h_parent, isdir);
+ } else {
+ if (unlikely(d_is_negative(a->dst_h_dentry)))
+ goto out;
+ h_inode = d_inode(a->dst_h_dentry);
+ if (h_inode->i_nlink)
+ err = au_may_del(a->dst_dentry, a->btgt,
+ a->dst_h_parent, isdir);
+ }
+
+out:
+ if (unlikely(err == -ENOENT || err == -EEXIST))
+ err = -EIO;
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * locking order
+ * (VFS)
+ * - src_dir and dir by lock_rename()
+ * - inode if exitsts
+ * (aufs)
+ * - lock all
+ * + src_dentry and dentry by aufs_read_and_write_lock2() which calls,
+ * + si_read_lock
+ * + di_write_lock2_child()
+ * + di_write_lock_child()
+ * + ii_write_lock_child()
+ * + di_write_lock_child2()
+ * + ii_write_lock_child2()
+ * + src_parent and parent
+ * + di_write_lock_parent()
+ * + ii_write_lock_parent()
+ * + di_write_lock_parent2()
+ * + ii_write_lock_parent2()
+ * + lower src_dir and dir by vfsub_lock_rename()
+ * + verify the every relationships between child and parent. if any
+ * of them failed, unlock all and return -EBUSY.
+ */
+static void au_ren_unlock(struct au_ren_args *a)
+{
+ vfsub_unlock_rename(a->src_h_parent, a->src_hdir,
+ a->dst_h_parent, a->dst_hdir);
+ if (au_ftest_ren(a->flags, MNT_WRITE))
+ vfsub_mnt_drop_write(au_br_mnt(a->br));
+}
+
+static int au_ren_lock(struct au_ren_args *a)
+{
+ int err;
+ unsigned int udba;
+
+ err = 0;
+ a->src_h_parent = au_h_dptr(a->src_parent, a->btgt);
+ a->src_hdir = au_hi(a->src_dir, a->btgt);
+ a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt);
+ a->dst_hdir = au_hi(a->dst_dir, a->btgt);
+
+ err = vfsub_mnt_want_write(au_br_mnt(a->br));
+ if (unlikely(err))
+ goto out;
+ au_fset_ren(a->flags, MNT_WRITE);
+ a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir,
+ a->dst_h_parent, a->dst_hdir);
+ udba = au_opt_udba(a->src_dentry->d_sb);
+ if (unlikely(a->src_hdir->hi_inode != d_inode(a->src_h_parent)
+ || a->dst_hdir->hi_inode != d_inode(a->dst_h_parent)))
+ err = au_busy_or_stale();
+ if (!err && au_dbstart(a->src_dentry) == a->btgt)
+ err = au_h_verify(a->src_h_dentry, udba,
+ d_inode(a->src_h_parent), a->src_h_parent,
+ a->br);
+ if (!err && au_dbstart(a->dst_dentry) == a->btgt)
+ err = au_h_verify(a->dst_h_dentry, udba,
+ d_inode(a->dst_h_parent), a->dst_h_parent,
+ a->br);
+ if (!err)
+ goto out; /* success */
+
+ err = au_busy_or_stale();
+ au_ren_unlock(a);
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_ren_refresh_dir(struct au_ren_args *a)
+{
+ struct inode *dir;
+
+ dir = a->dst_dir;
+ dir->i_version++;
+ if (au_ftest_ren(a->flags, ISDIR)) {
+ /* is this updating defined in POSIX? */
+ au_cpup_attr_timesizes(a->src_inode);
+ au_cpup_attr_nlink(dir, /*force*/1);
+ }
+
+ au_dir_ts(dir, a->btgt);
+
+ if (au_ftest_ren(a->flags, ISSAMEDIR))
+ return;
+
+ dir = a->src_dir;
+ dir->i_version++;
+ if (au_ftest_ren(a->flags, ISDIR))
+ au_cpup_attr_nlink(dir, /*force*/1);
+ au_dir_ts(dir, a->btgt);
+}
+
+static void au_ren_refresh(struct au_ren_args *a)
+{
+ aufs_bindex_t bend, bindex;
+ struct dentry *d, *h_d;
+ struct inode *i, *h_i;
+ struct super_block *sb;
+
+ d = a->dst_dentry;
+ d_drop(d);
+ if (a->h_dst)
+ /* already dget-ed by au_ren_or_cpup() */
+ au_set_h_dptr(d, a->btgt, a->h_dst);
+
+ i = a->dst_inode;
+ if (i) {
+ if (!au_ftest_ren(a->flags, ISDIR))
+ vfsub_drop_nlink(i);
+ else {
+ vfsub_dead_dir(i);
+ au_cpup_attr_timesizes(i);
+ }
+ au_update_dbrange(d, /*do_put_zero*/1);
+ } else {
+ bend = a->btgt;
+ for (bindex = au_dbstart(d); bindex < bend; bindex++)
+ au_set_h_dptr(d, bindex, NULL);
+ bend = au_dbend(d);
+ for (bindex = a->btgt + 1; bindex <= bend; bindex++)
+ au_set_h_dptr(d, bindex, NULL);
+ au_update_dbrange(d, /*do_put_zero*/0);
+ }
+
+ d = a->src_dentry;
+ au_set_dbwh(d, -1);
+ bend = au_dbend(d);
+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
+ h_d = au_h_dptr(d, bindex);
+ if (h_d)
+ au_set_h_dptr(d, bindex, NULL);
+ }
+ au_set_dbend(d, a->btgt);
+
+ sb = d->d_sb;
+ i = a->src_inode;
+ if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i))
+ return; /* success */
+
+ bend = au_ibend(i);
+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) {
+ h_i = au_h_iptr(i, bindex);
+ if (h_i) {
+ au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0);
+ /* ignore this error */
+ au_set_h_iptr(i, bindex, NULL, 0);
+ }
+ }
+ au_set_ibend(i, a->btgt);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* mainly for link(2) and rename(2) */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt)
+{
+ aufs_bindex_t bdiropq, bwh;
+ struct dentry *parent;
+ struct au_branch *br;
+
+ parent = dentry->d_parent;
+ IMustLock(d_inode(parent)); /* dir is locked */
+
+ bdiropq = au_dbdiropq(parent);
+ bwh = au_dbwh(dentry);
+ br = au_sbr(dentry->d_sb, btgt);
+ if (au_br_rdonly(br)
+ || (0 <= bdiropq && bdiropq < btgt)
+ || (0 <= bwh && bwh < btgt))
+ btgt = -1;
+
+ AuDbg("btgt %d\n", btgt);
+ return btgt;
+}
+
+/* sets src_bstart, dst_bstart and btgt */
+static int au_ren_wbr(struct au_ren_args *a)
+{
+ int err;
+ struct au_wr_dir_args wr_dir_args = {
+ /* .force_btgt = -1, */
+ .flags = AuWrDir_ADD_ENTRY
+ };
+
+ a->src_bstart = au_dbstart(a->src_dentry);
+ a->dst_bstart = au_dbstart(a->dst_dentry);
+ if (au_ftest_ren(a->flags, ISDIR))
+ au_fset_wrdir(wr_dir_args.flags, ISDIR);
+ wr_dir_args.force_btgt = a->src_bstart;
+ if (a->dst_inode && a->dst_bstart < a->src_bstart)
+ wr_dir_args.force_btgt = a->dst_bstart;
+ wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt);
+ err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args);
+ a->btgt = err;
+
+ return err;
+}
+
+static void au_ren_dt(struct au_ren_args *a)
+{
+ a->h_path.dentry = a->src_h_parent;
+ au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path);
+ if (!au_ftest_ren(a->flags, ISSAMEDIR)) {
+ a->h_path.dentry = a->dst_h_parent;
+ au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path);
+ }
+
+ au_fclr_ren(a->flags, DT_DSTDIR);
+ if (!au_ftest_ren(a->flags, ISDIR))
+ return;
+
+ a->h_path.dentry = a->src_h_dentry;
+ au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path);
+ if (d_is_positive(a->dst_h_dentry)) {
+ au_fset_ren(a->flags, DT_DSTDIR);
+ a->h_path.dentry = a->dst_h_dentry;
+ au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path);
+ }
+}
+
+static void au_ren_rev_dt(int err, struct au_ren_args *a)
+{
+ struct dentry *h_d;
+ struct inode *h_inode;
+
+ au_dtime_revert(a->src_dt + AuPARENT);
+ if (!au_ftest_ren(a->flags, ISSAMEDIR))
+ au_dtime_revert(a->dst_dt + AuPARENT);
+
+ if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) {
+ h_d = a->src_dt[AuCHILD].dt_h_path.dentry;
+ h_inode = d_inode(h_d);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ au_dtime_revert(a->src_dt + AuCHILD);
+ inode_unlock(h_inode);
+
+ if (au_ftest_ren(a->flags, DT_DSTDIR)) {
+ h_d = a->dst_dt[AuCHILD].dt_h_path.dentry;
+ h_inode = d_inode(h_d);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ au_dtime_revert(a->dst_dt + AuCHILD);
+ inode_unlock(h_inode);
+ }
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry,
+ struct inode *_dst_dir, struct dentry *_dst_dentry)
+{
+ int err, flags;
+ /* reduce stack space */
+ struct au_ren_args *a;
+
+ AuDbg("%pd, %pd\n", _src_dentry, _dst_dentry);
+ IMustLock(_src_dir);
+ IMustLock(_dst_dir);
+
+ err = -ENOMEM;
+ BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE);
+ a = kzalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ a->src_dir = _src_dir;
+ a->src_dentry = _src_dentry;
+ a->src_inode = NULL;
+ if (d_really_is_positive(a->src_dentry))
+ a->src_inode = d_inode(a->src_dentry);
+ a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */
+ a->dst_dir = _dst_dir;
+ a->dst_dentry = _dst_dentry;
+ a->dst_inode = NULL;
+ if (d_really_is_positive(a->dst_dentry))
+ a->dst_inode = d_inode(a->dst_dentry);
+ a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */
+ if (a->dst_inode) {
+ IMustLock(a->dst_inode);
+ au_igrab(a->dst_inode);
+ }
+
+ err = -ENOTDIR;
+ flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN;
+ if (d_is_dir(a->src_dentry)) {
+ au_fset_ren(a->flags, ISDIR);
+ if (unlikely(d_really_is_positive(a->dst_dentry)
+ && !d_is_dir(a->dst_dentry)))
+ goto out_free;
+ flags |= AuLock_DIRS;
+ }
+ err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry, flags);
+ if (unlikely(err))
+ goto out_free;
+
+ err = au_d_hashed_positive(a->src_dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ err = -ENOENT;
+ if (a->dst_inode) {
+ /*
+ * If it is a dir, VFS unhash dst_dentry before this
+ * function. It means we cannot rely upon d_unhashed().
+ */
+ if (unlikely(!a->dst_inode->i_nlink))
+ goto out_unlock;
+ if (!S_ISDIR(a->dst_inode->i_mode)) {
+ err = au_d_hashed_positive(a->dst_dentry);
+ if (unlikely(err))
+ goto out_unlock;
+ } else if (unlikely(IS_DEADDIR(a->dst_inode)))
+ goto out_unlock;
+ } else if (unlikely(d_unhashed(a->dst_dentry)))
+ goto out_unlock;
+
+ /*
+ * is it possible?
+ * yes, it happened (in linux-3.3-rcN) but I don't know why.
+ * there may exist a problem somewhere else.
+ */
+ err = -EINVAL;
+ if (unlikely(d_inode(a->dst_parent) == d_inode(a->src_dentry)))
+ goto out_unlock;
+
+ au_fset_ren(a->flags, ISSAMEDIR); /* temporary */
+ di_write_lock_parent(a->dst_parent);
+
+ /* which branch we process */
+ err = au_ren_wbr(a);
+ if (unlikely(err < 0))
+ goto out_parent;
+ a->br = au_sbr(a->dst_dentry->d_sb, a->btgt);
+ a->h_path.mnt = au_br_mnt(a->br);
+
+ /* are they available to be renamed */
+ err = au_ren_may_dir(a);
+ if (unlikely(err))
+ goto out_children;
+
+ /* prepare the writable parent dir on the same branch */
+ if (a->dst_bstart == a->btgt) {
+ au_fset_ren(a->flags, WHDST);
+ } else {
+ err = au_cpup_dirs(a->dst_dentry, a->btgt);
+ if (unlikely(err))
+ goto out_children;
+ }
+
+ if (a->src_dir != a->dst_dir) {
+ /*
+ * this temporary unlock is safe,
+ * because both dir->i_mutex are locked.
+ */
+ di_write_unlock(a->dst_parent);
+ di_write_lock_parent(a->src_parent);
+ err = au_wr_dir_need_wh(a->src_dentry,
+ au_ftest_ren(a->flags, ISDIR),
+ &a->btgt);
+ di_write_unlock(a->src_parent);
+ di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1);
+ au_fclr_ren(a->flags, ISSAMEDIR);
+ } else
+ err = au_wr_dir_need_wh(a->src_dentry,
+ au_ftest_ren(a->flags, ISDIR),
+ &a->btgt);
+ if (unlikely(err < 0))
+ goto out_children;
+ if (err)
+ au_fset_ren(a->flags, WHSRC);
+
+ /* cpup src */
+ if (a->src_bstart != a->btgt) {
+ struct au_pin pin;
+
+ err = au_pin(&pin, a->src_dentry, a->btgt,
+ au_opt_udba(a->src_dentry->d_sb),
+ AuPin_DI_LOCKED | AuPin_MNT_WRITE);
+ if (!err) {
+ struct au_cp_generic cpg = {
+ .dentry = a->src_dentry,
+ .bdst = a->btgt,
+ .bsrc = a->src_bstart,
+ .len = -1,
+ .pin = &pin,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+ AuDebugOn(au_dbstart(a->src_dentry) != a->src_bstart);
+ err = au_sio_cpup_simple(&cpg);
+ au_unpin(&pin);
+ }
+ if (unlikely(err))
+ goto out_children;
+ a->src_bstart = a->btgt;
+ a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt);
+ au_fset_ren(a->flags, WHSRC);
+ }
+
+ /* lock them all */
+ err = au_ren_lock(a);
+ if (unlikely(err))
+ /* leave the copied-up one */
+ goto out_children;
+
+ if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE))
+ err = au_may_ren(a);
+ else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN))
+ err = -ENAMETOOLONG;
+ if (unlikely(err))
+ goto out_hdir;
+
+ /* store timestamps to be revertible */
+ au_ren_dt(a);
+
+ /* here we go */
+ err = do_rename(a);
+ if (unlikely(err))
+ goto out_dt;
+
+ /* update dir attributes */
+ au_ren_refresh_dir(a);
+
+ /* dput/iput all lower dentries */
+ au_ren_refresh(a);
+
+ goto out_hdir; /* success */
+
+out_dt:
+ au_ren_rev_dt(err, a);
+out_hdir:
+ au_ren_unlock(a);
+out_children:
+ au_nhash_wh_free(&a->whlist);
+ if (err && a->dst_inode && a->dst_bstart != a->btgt) {
+ AuDbg("bstart %d, btgt %d\n", a->dst_bstart, a->btgt);
+ au_set_h_dptr(a->dst_dentry, a->btgt, NULL);
+ au_set_dbstart(a->dst_dentry, a->dst_bstart);
+ }
+out_parent:
+ if (!err)
+ d_move(a->src_dentry, a->dst_dentry);
+ else {
+ au_update_dbstart(a->dst_dentry);
+ if (!a->dst_inode)
+ d_drop(a->dst_dentry);
+ }
+ if (au_ftest_ren(a->flags, ISSAMEDIR))
+ di_write_unlock(a->dst_parent);
+ else
+ di_write_unlock2(a->src_parent, a->dst_parent);
+out_unlock:
+ aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry);
+out_free:
+ iput(a->dst_inode);
+ if (a->thargs)
+ au_whtmp_rmdir_free(a->thargs);
+ kfree(a);
+out:
+ AuTraceErr(err);
+ return err;
+}
diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c
new file mode 100644
index 000000000..67ef672a0
--- /dev/null
+++ b/fs/aufs/iinfo.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode private data
+ */
+
+#include "aufs.h"
+
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex)
+{
+ struct inode *h_inode;
+
+ IiMustAnyLock(inode);
+
+ h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode;
+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+ return h_inode;
+}
+
+/* todo: hard/soft set? */
+void au_hiput(struct au_hinode *hinode)
+{
+ au_hn_free(hinode);
+ dput(hinode->hi_whdentry);
+ iput(hinode->hi_inode);
+}
+
+unsigned int au_hi_flags(struct inode *inode, int isdir)
+{
+ unsigned int flags;
+ const unsigned int mnt_flags = au_mntflags(inode->i_sb);
+
+ flags = 0;
+ if (au_opt_test(mnt_flags, XINO))
+ au_fset_hi(flags, XINO);
+ if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY))
+ au_fset_hi(flags, HNOTIFY);
+ return flags;
+}
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode, unsigned int flags)
+{
+ struct au_hinode *hinode;
+ struct inode *hi;
+ struct au_iinfo *iinfo = au_ii(inode);
+
+ IiMustWriteLock(inode);
+
+ hinode = iinfo->ii_hinode + bindex;
+ hi = hinode->hi_inode;
+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0);
+
+ if (hi)
+ au_hiput(hinode);
+ hinode->hi_inode = h_inode;
+ if (h_inode) {
+ int err;
+ struct super_block *sb = inode->i_sb;
+ struct au_branch *br;
+
+ AuDebugOn(inode->i_mode
+ && (h_inode->i_mode & S_IFMT)
+ != (inode->i_mode & S_IFMT));
+ if (bindex == iinfo->ii_bstart)
+ au_cpup_igen(inode, h_inode);
+ br = au_sbr(sb, bindex);
+ hinode->hi_id = br->br_id;
+ if (au_ftest_hi(flags, XINO)) {
+ err = au_xino_write(sb, bindex, h_inode->i_ino,
+ inode->i_ino);
+ if (unlikely(err))
+ AuIOErr1("failed au_xino_write() %d\n", err);
+ }
+
+ if (au_ftest_hi(flags, HNOTIFY)
+ && au_br_hnotifyable(br->br_perm)) {
+ err = au_hn_alloc(hinode, inode);
+ if (unlikely(err))
+ AuIOErr1("au_hn_alloc() %d\n", err);
+ }
+ }
+}
+
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_wh)
+{
+ struct au_hinode *hinode;
+
+ IiMustWriteLock(inode);
+
+ hinode = au_ii(inode)->ii_hinode + bindex;
+ AuDebugOn(hinode->hi_whdentry);
+ hinode->hi_whdentry = h_wh;
+}
+
+void au_update_iigen(struct inode *inode, int half)
+{
+ struct au_iinfo *iinfo;
+ struct au_iigen *iigen;
+ unsigned int sigen;
+
+ sigen = au_sigen(inode->i_sb);
+ iinfo = au_ii(inode);
+ iigen = &iinfo->ii_generation;
+ spin_lock(&iigen->ig_spin);
+ iigen->ig_generation = sigen;
+ if (half)
+ au_ig_fset(iigen->ig_flags, HALF_REFRESHED);
+ else
+ au_ig_fclr(iigen->ig_flags, HALF_REFRESHED);
+ spin_unlock(&iigen->ig_spin);
+}
+
+/* it may be called at remount time, too */
+void au_update_ibrange(struct inode *inode, int do_put_zero)
+{
+ struct au_iinfo *iinfo;
+ aufs_bindex_t bindex, bend;
+
+ iinfo = au_ii(inode);
+ if (!iinfo)
+ return;
+
+ IiMustWriteLock(inode);
+
+ if (do_put_zero && iinfo->ii_bstart >= 0) {
+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
+ bindex++) {
+ struct inode *h_i;
+
+ h_i = iinfo->ii_hinode[0 + bindex].hi_inode;
+ if (h_i
+ && !h_i->i_nlink
+ && !(h_i->i_state & I_LINKABLE))
+ au_set_h_iptr(inode, bindex, NULL, 0);
+ }
+ }
+
+ iinfo->ii_bstart = -1;
+ iinfo->ii_bend = -1;
+ bend = au_sbend(inode->i_sb);
+ for (bindex = 0; bindex <= bend; bindex++)
+ if (iinfo->ii_hinode[0 + bindex].hi_inode) {
+ iinfo->ii_bstart = bindex;
+ break;
+ }
+ if (iinfo->ii_bstart >= 0)
+ for (bindex = bend; bindex >= iinfo->ii_bstart; bindex--)
+ if (iinfo->ii_hinode[0 + bindex].hi_inode) {
+ iinfo->ii_bend = bindex;
+ break;
+ }
+ AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_icntnr_init_once(void *_c)
+{
+ struct au_icntnr *c = _c;
+ struct au_iinfo *iinfo = &c->iinfo;
+ static struct lock_class_key aufs_ii;
+
+ spin_lock_init(&iinfo->ii_generation.ig_spin);
+ au_rw_init(&iinfo->ii_rwsem);
+ au_rw_class(&iinfo->ii_rwsem, &aufs_ii);
+ inode_init_once(&c->vfs_inode);
+}
+
+int au_iinfo_init(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct super_block *sb;
+ int nbr, i;
+
+ sb = inode->i_sb;
+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+ nbr = au_sbend(sb) + 1;
+ if (unlikely(nbr <= 0))
+ nbr = 1;
+ iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS);
+ if (iinfo->ii_hinode) {
+ au_ninodes_inc(sb);
+ for (i = 0; i < nbr; i++)
+ iinfo->ii_hinode[i].hi_id = -1;
+
+ iinfo->ii_generation.ig_generation = au_sigen(sb);
+ iinfo->ii_bstart = -1;
+ iinfo->ii_bend = -1;
+ iinfo->ii_vdir = NULL;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+int au_ii_realloc(struct au_iinfo *iinfo, int nbr)
+{
+ int err, sz;
+ struct au_hinode *hip;
+
+ AuRwMustWriteLock(&iinfo->ii_rwsem);
+
+ err = -ENOMEM;
+ sz = sizeof(*hip) * (iinfo->ii_bend + 1);
+ if (!sz)
+ sz = sizeof(*hip);
+ hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS);
+ if (hip) {
+ iinfo->ii_hinode = hip;
+ err = 0;
+ }
+
+ return err;
+}
+
+void au_iinfo_fin(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct au_hinode *hi;
+ struct super_block *sb;
+ aufs_bindex_t bindex, bend;
+ const unsigned char unlinked = !inode->i_nlink;
+
+ iinfo = au_ii(inode);
+ /* bad_inode case */
+ if (!iinfo)
+ return;
+
+ sb = inode->i_sb;
+ au_ninodes_dec(sb);
+ if (si_pid_test(sb))
+ au_xino_delete_inode(inode, unlinked);
+ else {
+ /*
+ * it is safe to hide the dependency between sbinfo and
+ * sb->s_umount.
+ */
+ lockdep_off();
+ si_noflush_read_lock(sb);
+ au_xino_delete_inode(inode, unlinked);
+ si_read_unlock(sb);
+ lockdep_on();
+ }
+
+ if (iinfo->ii_vdir)
+ au_vdir_free(iinfo->ii_vdir);
+
+ bindex = iinfo->ii_bstart;
+ if (bindex >= 0) {
+ hi = iinfo->ii_hinode + bindex;
+ bend = iinfo->ii_bend;
+ while (bindex++ <= bend) {
+ if (hi->hi_inode)
+ au_hiput(hi);
+ hi++;
+ }
+ }
+ kfree(iinfo->ii_hinode);
+ iinfo->ii_hinode = NULL;
+ AuRwDestroy(&iinfo->ii_rwsem);
+}
diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c
new file mode 100644
index 000000000..5a87727ba
--- /dev/null
+++ b/fs/aufs/inode.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode functions
+ */
+
+#include "aufs.h"
+
+struct inode *au_igrab(struct inode *inode)
+{
+ if (inode) {
+ AuDebugOn(!atomic_read(&inode->i_count));
+ ihold(inode);
+ }
+ return inode;
+}
+
+static void au_refresh_hinode_attr(struct inode *inode, int do_version)
+{
+ au_cpup_attr_all(inode, /*force*/0);
+ au_update_iigen(inode, /*half*/1);
+ if (do_version)
+ inode->i_version++;
+}
+
+static int au_ii_refresh(struct inode *inode, int *update)
+{
+ int err, e;
+ umode_t type;
+ aufs_bindex_t bindex, new_bindex;
+ struct super_block *sb;
+ struct au_iinfo *iinfo;
+ struct au_hinode *p, *q, tmp;
+
+ IiMustWriteLock(inode);
+
+ *update = 0;
+ sb = inode->i_sb;
+ type = inode->i_mode & S_IFMT;
+ iinfo = au_ii(inode);
+ err = au_ii_realloc(iinfo, au_sbend(sb) + 1);
+ if (unlikely(err))
+ goto out;
+
+ AuDebugOn(iinfo->ii_bstart < 0);
+ p = iinfo->ii_hinode + iinfo->ii_bstart;
+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend;
+ bindex++, p++) {
+ if (!p->hi_inode)
+ continue;
+
+ AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT));
+ new_bindex = au_br_index(sb, p->hi_id);
+ if (new_bindex == bindex)
+ continue;
+
+ if (new_bindex < 0) {
+ *update = 1;
+ au_hiput(p);
+ p->hi_inode = NULL;
+ continue;
+ }
+
+ if (new_bindex < iinfo->ii_bstart)
+ iinfo->ii_bstart = new_bindex;
+ if (iinfo->ii_bend < new_bindex)
+ iinfo->ii_bend = new_bindex;
+ /* swap two lower inode, and loop again */
+ q = iinfo->ii_hinode + new_bindex;
+ tmp = *q;
+ *q = *p;
+ *p = tmp;
+ if (tmp.hi_inode) {
+ bindex--;
+ p--;
+ }
+ }
+ au_update_ibrange(inode, /*do_put_zero*/0);
+ e = au_dy_irefresh(inode);
+ if (unlikely(e && !err))
+ err = e;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+void au_refresh_iop(struct inode *inode, int force_getattr)
+{
+ int type;
+ struct au_sbinfo *sbi = au_sbi(inode->i_sb);
+ const struct inode_operations *iop
+ = force_getattr ? aufs_iop : sbi->si_iop_array;
+
+ if (inode->i_op == iop)
+ return;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFDIR:
+ type = AuIop_DIR;
+ break;
+ case S_IFLNK:
+ type = AuIop_SYMLINK;
+ break;
+ default:
+ type = AuIop_OTHER;
+ break;
+ }
+
+ inode->i_op = iop + type;
+ /* unnecessary smp_wmb() */
+}
+
+int au_refresh_hinode_self(struct inode *inode)
+{
+ int err, update;
+
+ err = au_ii_refresh(inode, &update);
+ if (!err)
+ au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode));
+
+ AuTraceErr(err);
+ return err;
+}
+
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry)
+{
+ int err, e, update;
+ unsigned int flags;
+ umode_t mode;
+ aufs_bindex_t bindex, bend;
+ unsigned char isdir;
+ struct au_hinode *p;
+ struct au_iinfo *iinfo;
+
+ err = au_ii_refresh(inode, &update);
+ if (unlikely(err))
+ goto out;
+
+ update = 0;
+ iinfo = au_ii(inode);
+ p = iinfo->ii_hinode + iinfo->ii_bstart;
+ mode = (inode->i_mode & S_IFMT);
+ isdir = S_ISDIR(mode);
+ flags = au_hi_flags(inode, isdir);
+ bend = au_dbend(dentry);
+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) {
+ struct inode *h_i, *h_inode;
+ struct dentry *h_d;
+
+ h_d = au_h_dptr(dentry, bindex);
+ if (!h_d || d_is_negative(h_d))
+ continue;
+
+ h_inode = d_inode(h_d);
+ AuDebugOn(mode != (h_inode->i_mode & S_IFMT));
+ if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) {
+ h_i = au_h_iptr(inode, bindex);
+ if (h_i) {
+ if (h_i == h_inode)
+ continue;
+ err = -EIO;
+ break;
+ }
+ }
+ if (bindex < iinfo->ii_bstart)
+ iinfo->ii_bstart = bindex;
+ if (iinfo->ii_bend < bindex)
+ iinfo->ii_bend = bindex;
+ au_set_h_iptr(inode, bindex, au_igrab(h_inode), flags);
+ update = 1;
+ }
+ au_update_ibrange(inode, /*do_put_zero*/0);
+ e = au_dy_irefresh(inode);
+ if (unlikely(e && !err))
+ err = e;
+ if (!err)
+ au_refresh_hinode_attr(inode, update && isdir);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int set_inode(struct inode *inode, struct dentry *dentry)
+{
+ int err;
+ unsigned int flags;
+ umode_t mode;
+ aufs_bindex_t bindex, bstart, btail;
+ unsigned char isdir;
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+ struct au_iinfo *iinfo;
+ struct inode_operations *iop;
+
+ IiMustWriteLock(inode);
+
+ err = 0;
+ isdir = 0;
+ iop = au_sbi(inode->i_sb)->si_iop_array;
+ bstart = au_dbstart(dentry);
+ h_dentry = au_h_dptr(dentry, bstart);
+ h_inode = d_inode(h_dentry);
+ mode = h_inode->i_mode;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ btail = au_dbtail(dentry);
+ inode->i_op = iop + AuIop_OTHER;
+ inode->i_fop = &aufs_file_fop;
+ err = au_dy_iaop(inode, bstart, h_inode);
+ if (unlikely(err))
+ goto out;
+ break;
+ case S_IFDIR:
+ isdir = 1;
+ btail = au_dbtaildir(dentry);
+ inode->i_op = iop + AuIop_DIR;
+ inode->i_fop = &aufs_dir_fop;
+ break;
+ case S_IFLNK:
+ btail = au_dbtail(dentry);
+ inode->i_op = iop + AuIop_SYMLINK;
+ break;
+ case S_IFBLK:
+ case S_IFCHR:
+ case S_IFIFO:
+ case S_IFSOCK:
+ btail = au_dbtail(dentry);
+ inode->i_op = iop + AuIop_OTHER;
+ init_special_inode(inode, mode, h_inode->i_rdev);
+ break;
+ default:
+ AuIOErr("Unknown file type 0%o\n", mode);
+ err = -EIO;
+ goto out;
+ }
+
+ /* do not set hnotify for whiteouted dirs (SHWH mode) */
+ flags = au_hi_flags(inode, isdir);
+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)
+ && au_ftest_hi(flags, HNOTIFY)
+ && dentry->d_name.len > AUFS_WH_PFX_LEN
+ && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))
+ au_fclr_hi(flags, HNOTIFY);
+ iinfo = au_ii(inode);
+ iinfo->ii_bstart = bstart;
+ iinfo->ii_bend = btail;
+ for (bindex = bstart; bindex <= btail; bindex++) {
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (h_dentry)
+ au_set_h_iptr(inode, bindex,
+ au_igrab(d_inode(h_dentry)), flags);
+ }
+ au_cpup_attr_all(inode, /*force*/1);
+ /*
+ * to force calling aufs_get_acl() every time,
+ * do not call cache_no_acl() for aufs inode.
+ */
+
+out:
+ return err;
+}
+
+/*
+ * successful returns with iinfo write_locked
+ * minus: errno
+ * zero: success, matched
+ * plus: no error, but unmatched
+ */
+static int reval_inode(struct inode *inode, struct dentry *dentry)
+{
+ int err;
+ unsigned int gen, igflags;
+ aufs_bindex_t bindex, bend;
+ struct inode *h_inode, *h_dinode;
+ struct dentry *h_dentry;
+
+ /*
+ * before this function, if aufs got any iinfo lock, it must be only
+ * one, the parent dir.
+ * it can happen by UDBA and the obsoleted inode number.
+ */
+ err = -EIO;
+ if (unlikely(inode->i_ino == parent_ino(dentry)))
+ goto out;
+
+ err = 1;
+ ii_write_lock_new_child(inode);
+ h_dentry = au_h_dptr(dentry, au_dbstart(dentry));
+ h_dinode = d_inode(h_dentry);
+ bend = au_ibend(inode);
+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) {
+ h_inode = au_h_iptr(inode, bindex);
+ if (!h_inode || h_inode != h_dinode)
+ continue;
+
+ err = 0;
+ gen = au_iigen(inode, &igflags);
+ if (gen == au_digen(dentry)
+ && !au_ig_ftest(igflags, HALF_REFRESHED))
+ break;
+
+ /* fully refresh inode using dentry */
+ err = au_refresh_hinode(inode, dentry);
+ if (!err)
+ au_update_iigen(inode, /*half*/0);
+ break;
+ }
+
+ if (unlikely(err))
+ ii_write_unlock(inode);
+out:
+ return err;
+}
+
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ unsigned int d_type, ino_t *ino)
+{
+ int err;
+ struct mutex *mtx;
+
+ /* prevent hardlinked inode number from race condition */
+ mtx = NULL;
+ if (d_type != DT_DIR) {
+ mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx;
+ mutex_lock(mtx);
+ }
+ err = au_xino_read(sb, bindex, h_ino, ino);
+ if (unlikely(err))
+ goto out;
+
+ if (!*ino) {
+ err = -EIO;
+ *ino = au_xino_new_ino(sb);
+ if (unlikely(!*ino))
+ goto out;
+ err = au_xino_write(sb, bindex, h_ino, *ino);
+ if (unlikely(err))
+ goto out;
+ }
+
+out:
+ if (mtx)
+ mutex_unlock(mtx);
+ return err;
+}
+
+/* successful returns with iinfo write_locked */
+/* todo: return with unlocked? */
+struct inode *au_new_inode(struct dentry *dentry, int must_new)
+{
+ struct inode *inode, *h_inode;
+ struct dentry *h_dentry;
+ struct super_block *sb;
+ struct mutex *mtx;
+ ino_t h_ino, ino;
+ int err;
+ aufs_bindex_t bstart;
+
+ sb = dentry->d_sb;
+ bstart = au_dbstart(dentry);
+ h_dentry = au_h_dptr(dentry, bstart);
+ h_inode = d_inode(h_dentry);
+ h_ino = h_inode->i_ino;
+
+ /*
+ * stop 'race'-ing between hardlinks under different
+ * parents.
+ */
+ mtx = NULL;
+ if (!d_is_dir(h_dentry))
+ mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx;
+
+new_ino:
+ if (mtx)
+ mutex_lock(mtx);
+ err = au_xino_read(sb, bstart, h_ino, &ino);
+ inode = ERR_PTR(err);
+ if (unlikely(err))
+ goto out;
+
+ if (!ino) {
+ ino = au_xino_new_ino(sb);
+ if (unlikely(!ino)) {
+ inode = ERR_PTR(-EIO);
+ goto out;
+ }
+ }
+
+ AuDbg("i%lu\n", (unsigned long)ino);
+ inode = au_iget_locked(sb, ino);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out;
+
+ AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW));
+ if (inode->i_state & I_NEW) {
+ /* verbose coding for lock class name */
+ if (unlikely(d_is_symlink(h_dentry)))
+ au_rw_class(&au_ii(inode)->ii_rwsem,
+ au_lc_key + AuLcSymlink_IIINFO);
+ else if (unlikely(d_is_dir(h_dentry)))
+ au_rw_class(&au_ii(inode)->ii_rwsem,
+ au_lc_key + AuLcDir_IIINFO);
+ else /* likely */
+ au_rw_class(&au_ii(inode)->ii_rwsem,
+ au_lc_key + AuLcNonDir_IIINFO);
+
+ ii_write_lock_new_child(inode);
+ err = set_inode(inode, dentry);
+ if (!err) {
+ unlock_new_inode(inode);
+ goto out; /* success */
+ }
+
+ /*
+ * iget_failed() calls iput(), but we need to call
+ * ii_write_unlock() after iget_failed(). so dirty hack for
+ * i_count.
+ */
+ atomic_inc(&inode->i_count);
+ iget_failed(inode);
+ ii_write_unlock(inode);
+ au_xino_write(sb, bstart, h_ino, /*ino*/0);
+ /* ignore this error */
+ goto out_iput;
+ } else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) {
+ /*
+ * horrible race condition between lookup, readdir and copyup
+ * (or something).
+ */
+ if (mtx)
+ mutex_unlock(mtx);
+ err = reval_inode(inode, dentry);
+ if (unlikely(err < 0)) {
+ mtx = NULL;
+ goto out_iput;
+ }
+
+ if (!err) {
+ mtx = NULL;
+ goto out; /* success */
+ } else if (mtx)
+ mutex_lock(mtx);
+ }
+
+ if (unlikely(au_test_fs_unique_ino(h_inode)))
+ AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir,"
+ " b%d, %s, %pd, hi%lu, i%lu.\n",
+ bstart, au_sbtype(h_dentry->d_sb), dentry,
+ (unsigned long)h_ino, (unsigned long)ino);
+ ino = 0;
+ err = au_xino_write(sb, bstart, h_ino, /*ino*/0);
+ if (!err) {
+ iput(inode);
+ if (mtx)
+ mutex_unlock(mtx);
+ goto new_ino;
+ }
+
+out_iput:
+ iput(inode);
+ inode = ERR_PTR(err);
+out:
+ if (mtx)
+ mutex_unlock(mtx);
+ return inode;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+ struct inode *inode)
+{
+ int err;
+ struct inode *hi;
+
+ err = au_br_rdonly(au_sbr(sb, bindex));
+
+ /* pseudo-link after flushed may happen out of bounds */
+ if (!err
+ && inode
+ && au_ibstart(inode) <= bindex
+ && bindex <= au_ibend(inode)) {
+ /*
+ * permission check is unnecessary since vfsub routine
+ * will be called later
+ */
+ hi = au_h_iptr(inode, bindex);
+ if (hi)
+ err = IS_IMMUTABLE(hi) ? -EROFS : 0;
+ }
+
+ return err;
+}
+
+int au_test_h_perm(struct inode *h_inode, int mask)
+{
+ if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
+ return 0;
+ return inode_permission(h_inode, mask);
+}
+
+int au_test_h_perm_sio(struct inode *h_inode, int mask)
+{
+ if (au_test_nfs(h_inode->i_sb)
+ && (mask & MAY_WRITE)
+ && S_ISDIR(h_inode->i_mode))
+ mask |= MAY_READ; /* force permission check */
+ return au_test_h_perm(h_inode, mask);
+}
diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h
new file mode 100644
index 000000000..eeb43d220
--- /dev/null
+++ b/fs/aufs/inode.h
@@ -0,0 +1,672 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * inode operations
+ */
+
+#ifndef __AUFS_INODE_H__
+#define __AUFS_INODE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fsnotify.h>
+#include "rwsem.h"
+
+struct vfsmount;
+
+struct au_hnotify {
+#ifdef CONFIG_AUFS_HNOTIFY
+#ifdef CONFIG_AUFS_HFSNOTIFY
+ /* never use fsnotify_add_vfsmount_mark() */
+ struct fsnotify_mark hn_mark;
+#endif
+ struct inode *hn_aufs_inode; /* no get/put */
+#endif
+} ____cacheline_aligned_in_smp;
+
+struct au_hinode {
+ struct inode *hi_inode;
+ aufs_bindex_t hi_id;
+#ifdef CONFIG_AUFS_HNOTIFY
+ struct au_hnotify *hi_notify;
+#endif
+
+ /* reference to the copied-up whiteout with get/put */
+ struct dentry *hi_whdentry;
+};
+
+/* ig_flags */
+#define AuIG_HALF_REFRESHED 1
+#define au_ig_ftest(flags, name) ((flags) & AuIG_##name)
+#define au_ig_fset(flags, name) \
+ do { (flags) |= AuIG_##name; } while (0)
+#define au_ig_fclr(flags, name) \
+ do { (flags) &= ~AuIG_##name; } while (0)
+
+struct au_iigen {
+ spinlock_t ig_spin;
+ __u32 ig_generation, ig_flags;
+};
+
+struct au_vdir;
+struct au_iinfo {
+ struct au_iigen ii_generation;
+ struct super_block *ii_hsb1; /* no get/put */
+
+ struct au_rwsem ii_rwsem;
+ aufs_bindex_t ii_bstart, ii_bend;
+ __u32 ii_higen;
+ struct au_hinode *ii_hinode;
+ struct au_vdir *ii_vdir;
+};
+
+struct au_icntnr {
+ struct au_iinfo iinfo;
+ struct inode vfs_inode;
+} ____cacheline_aligned_in_smp;
+
+/* au_pin flags */
+#define AuPin_DI_LOCKED 1
+#define AuPin_MNT_WRITE (1 << 1)
+#define au_ftest_pin(flags, name) ((flags) & AuPin_##name)
+#define au_fset_pin(flags, name) \
+ do { (flags) |= AuPin_##name; } while (0)
+#define au_fclr_pin(flags, name) \
+ do { (flags) &= ~AuPin_##name; } while (0)
+
+struct au_pin {
+ /* input */
+ struct dentry *dentry;
+ unsigned int udba;
+ unsigned char lsc_di, lsc_hi, flags;
+ aufs_bindex_t bindex;
+
+ /* output */
+ struct dentry *parent;
+ struct au_hinode *hdir;
+ struct vfsmount *h_mnt;
+
+ /* temporary unlock/relock for copyup */
+ struct dentry *h_dentry, *h_parent;
+ struct au_branch *br;
+ struct task_struct *task;
+};
+
+void au_pin_hdir_unlock(struct au_pin *p);
+int au_pin_hdir_lock(struct au_pin *p);
+int au_pin_hdir_relock(struct au_pin *p);
+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task);
+void au_pin_hdir_acquire_nest(struct au_pin *p);
+void au_pin_hdir_release(struct au_pin *p);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_iinfo *au_ii(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+
+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo);
+ if (iinfo->ii_hinode)
+ return iinfo;
+ return NULL; /* debugging bad_inode case */
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* inode.c */
+struct inode *au_igrab(struct inode *inode);
+void au_refresh_iop(struct inode *inode, int force_getattr);
+int au_refresh_hinode_self(struct inode *inode);
+int au_refresh_hinode(struct inode *inode, struct dentry *dentry);
+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ unsigned int d_type, ino_t *ino);
+struct inode *au_new_inode(struct dentry *dentry, int must_new);
+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex,
+ struct inode *inode);
+int au_test_h_perm(struct inode *h_inode, int mask);
+int au_test_h_perm_sio(struct inode *h_inode, int mask);
+
+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex,
+ ino_t h_ino, unsigned int d_type, ino_t *ino)
+{
+#ifdef CONFIG_AUFS_SHWH
+ return au_ino(sb, bindex, h_ino, d_type, ino);
+#else
+ return 0;
+#endif
+}
+
+/* i_op.c */
+enum {
+ AuIop_SYMLINK,
+ AuIop_DIR,
+ AuIop_OTHER,
+ AuIop_Last
+};
+extern struct inode_operations aufs_iop[AuIop_Last],
+ aufs_iop_nogetattr[AuIop_Last];
+
+/* au_wr_dir flags */
+#define AuWrDir_ADD_ENTRY 1
+#define AuWrDir_ISDIR (1 << 1)
+#define AuWrDir_TMPFILE (1 << 2)
+#define au_ftest_wrdir(flags, name) ((flags) & AuWrDir_##name)
+#define au_fset_wrdir(flags, name) \
+ do { (flags) |= AuWrDir_##name; } while (0)
+#define au_fclr_wrdir(flags, name) \
+ do { (flags) &= ~AuWrDir_##name; } while (0)
+
+struct au_wr_dir_args {
+ aufs_bindex_t force_btgt;
+ unsigned char flags;
+};
+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry,
+ struct au_wr_dir_args *args);
+
+struct dentry *au_pinned_h_parent(struct au_pin *pin);
+void au_pin_init(struct au_pin *pin, struct dentry *dentry,
+ aufs_bindex_t bindex, int lsc_di, int lsc_hi,
+ unsigned int udba, unsigned char flags);
+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int udba, unsigned char flags) __must_check;
+int au_do_pin(struct au_pin *pin) __must_check;
+void au_unpin(struct au_pin *pin);
+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen);
+
+#define AuIcpup_DID_CPUP 1
+#define au_ftest_icpup(flags, name) ((flags) & AuIcpup_##name)
+#define au_fset_icpup(flags, name) \
+ do { (flags) |= AuIcpup_##name; } while (0)
+#define au_fclr_icpup(flags, name) \
+ do { (flags) &= ~AuIcpup_##name; } while (0)
+
+struct au_icpup_args {
+ unsigned char flags;
+ unsigned char pin_flags;
+ aufs_bindex_t btgt;
+ unsigned int udba;
+ struct au_pin pin;
+ struct path h_path;
+ struct inode *h_inode;
+};
+
+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia,
+ struct au_icpup_args *a);
+
+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path);
+
+/* i_op_add.c */
+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir);
+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ dev_t dev);
+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname);
+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ bool want_excl);
+struct vfsub_aopen_args;
+int au_aopen_or_create(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *args);
+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode);
+int aufs_link(struct dentry *src_dentry, struct inode *dir,
+ struct dentry *dentry);
+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+
+/* i_op_del.c */
+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup);
+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent, int isdir);
+int aufs_unlink(struct inode *dir, struct dentry *dentry);
+int aufs_rmdir(struct inode *dir, struct dentry *dentry);
+
+/* i_op_ren.c */
+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt);
+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry,
+ struct inode *dir, struct dentry *dentry);
+
+/* iinfo.c */
+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex);
+void au_hiput(struct au_hinode *hinode);
+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_wh);
+unsigned int au_hi_flags(struct inode *inode, int isdir);
+
+/* hinode flags */
+#define AuHi_XINO 1
+#define AuHi_HNOTIFY (1 << 1)
+#define au_ftest_hi(flags, name) ((flags) & AuHi_##name)
+#define au_fset_hi(flags, name) \
+ do { (flags) |= AuHi_##name; } while (0)
+#define au_fclr_hi(flags, name) \
+ do { (flags) &= ~AuHi_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuHi_HNOTIFY
+#define AuHi_HNOTIFY 0
+#endif
+
+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex,
+ struct inode *h_inode, unsigned int flags);
+
+void au_update_iigen(struct inode *inode, int half);
+void au_update_ibrange(struct inode *inode, int do_put_zero);
+
+void au_icntnr_init_once(void *_c);
+int au_iinfo_init(struct inode *inode);
+void au_iinfo_fin(struct inode *inode);
+int au_ii_realloc(struct au_iinfo *iinfo, int nbr);
+
+#ifdef CONFIG_PROC_FS
+/* plink.c */
+int au_plink_maint(struct super_block *sb, int flags);
+struct au_sbinfo;
+void au_plink_maint_leave(struct au_sbinfo *sbinfo);
+int au_plink_maint_enter(struct super_block *sb);
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb);
+#else
+AuStubVoid(au_plink_list, struct super_block *sb)
+#endif
+int au_plink_test(struct inode *inode);
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex);
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_dentry);
+void au_plink_put(struct super_block *sb, int verbose);
+void au_plink_clean(struct super_block *sb, int verbose);
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id);
+#else
+AuStubInt0(au_plink_maint, struct super_block *sb, int flags);
+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo);
+AuStubInt0(au_plink_maint_enter, struct super_block *sb);
+AuStubVoid(au_plink_list, struct super_block *sb);
+AuStubInt0(au_plink_test, struct inode *inode);
+AuStub(struct dentry *, au_plink_lkup, return NULL,
+ struct inode *inode, aufs_bindex_t bindex);
+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_dentry);
+AuStubVoid(au_plink_put, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose);
+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id);
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_AUFS_XATTR
+/* xattr.c */
+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
+ unsigned int verbose);
+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size);
+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value,
+ size_t size);
+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags);
+int aufs_removexattr(struct dentry *dentry, const char *name);
+
+/* void au_xattr_init(struct super_block *sb); */
+#else
+AuStubInt0(au_cpup_xattr, struct dentry *h_dst, struct dentry *h_src,
+ int ignore_flags, unsigned int verbose);
+/* AuStubVoid(au_xattr_init, struct super_block *sb); */
+#endif
+
+#ifdef CONFIG_FS_POSIX_ACL
+struct posix_acl *aufs_get_acl(struct inode *inode, int type);
+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+#endif
+
+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL)
+enum {
+ AU_XATTR_SET,
+ AU_XATTR_REMOVE,
+ AU_ACL_SET
+};
+
+struct au_srxattr {
+ int type;
+ union {
+ struct {
+ const char *name;
+ const void *value;
+ size_t size;
+ int flags;
+ } set;
+ struct {
+ const char *name;
+ } remove;
+ struct {
+ struct posix_acl *acl;
+ int type;
+ } acl_set;
+ } u;
+};
+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for iinfo */
+enum {
+ AuLsc_II_CHILD, /* child first */
+ AuLsc_II_CHILD2, /* rename(2), link(2), and cpup at hnotify */
+ AuLsc_II_CHILD3, /* copyup dirs */
+ AuLsc_II_PARENT, /* see AuLsc_I_PARENT in vfsub.h */
+ AuLsc_II_PARENT2,
+ AuLsc_II_PARENT3, /* copyup dirs */
+ AuLsc_II_NEW_CHILD
+};
+
+/*
+ * ii_read_lock_child, ii_write_lock_child,
+ * ii_read_lock_child2, ii_write_lock_child2,
+ * ii_read_lock_child3, ii_write_lock_child3,
+ * ii_read_lock_parent, ii_write_lock_parent,
+ * ii_read_lock_parent2, ii_write_lock_parent2,
+ * ii_read_lock_parent3, ii_write_lock_parent3,
+ * ii_read_lock_new_child, ii_write_lock_new_child,
+ */
+#define AuReadLockFunc(name, lsc) \
+static inline void ii_read_lock_##name(struct inode *i) \
+{ \
+ au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuWriteLockFunc(name, lsc) \
+static inline void ii_write_lock_##name(struct inode *i) \
+{ \
+ au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \
+}
+
+#define AuRWLockFuncs(name, lsc) \
+ AuReadLockFunc(name, lsc) \
+ AuWriteLockFunc(name, lsc)
+
+AuRWLockFuncs(child, CHILD);
+AuRWLockFuncs(child2, CHILD2);
+AuRWLockFuncs(child3, CHILD3);
+AuRWLockFuncs(parent, PARENT);
+AuRWLockFuncs(parent2, PARENT2);
+AuRWLockFuncs(parent3, PARENT3);
+AuRWLockFuncs(new_child, NEW_CHILD);
+
+#undef AuReadLockFunc
+#undef AuWriteLockFunc
+#undef AuRWLockFuncs
+
+/*
+ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock
+ */
+AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem);
+
+#define IiMustNoWaiters(i) AuRwMustNoWaiters(&au_ii(i)->ii_rwsem)
+#define IiMustAnyLock(i) AuRwMustAnyLock(&au_ii(i)->ii_rwsem)
+#define IiMustWriteLock(i) AuRwMustWriteLock(&au_ii(i)->ii_rwsem)
+
+/* ---------------------------------------------------------------------- */
+
+static inline void au_icntnr_init(struct au_icntnr *c)
+{
+#ifdef CONFIG_AUFS_DEBUG
+ c->vfs_inode.i_mode = 0;
+#endif
+}
+
+static inline unsigned int au_iigen(struct inode *inode, unsigned int *igflags)
+{
+ unsigned int gen;
+ struct au_iinfo *iinfo;
+ struct au_iigen *iigen;
+
+ iinfo = au_ii(inode);
+ iigen = &iinfo->ii_generation;
+ spin_lock(&iigen->ig_spin);
+ if (igflags)
+ *igflags = iigen->ig_flags;
+ gen = iigen->ig_generation;
+ spin_unlock(&iigen->ig_spin);
+
+ return gen;
+}
+
+/* tiny test for inode number */
+/* tmpfs generation is too rough */
+static inline int au_test_higen(struct inode *inode, struct inode *h_inode)
+{
+ struct au_iinfo *iinfo;
+
+ iinfo = au_ii(inode);
+ AuRwMustAnyLock(&iinfo->ii_rwsem);
+ return !(iinfo->ii_hsb1 == h_inode->i_sb
+ && iinfo->ii_higen == h_inode->i_generation);
+}
+
+static inline void au_iigen_dec(struct inode *inode)
+{
+ struct au_iinfo *iinfo;
+ struct au_iigen *iigen;
+
+ iinfo = au_ii(inode);
+ iigen = &iinfo->ii_generation;
+ spin_lock(&iigen->ig_spin);
+ iigen->ig_generation--;
+ spin_unlock(&iigen->ig_spin);
+}
+
+static inline int au_iigen_test(struct inode *inode, unsigned int sigen)
+{
+ int err;
+
+ err = 0;
+ if (unlikely(inode && au_iigen(inode, NULL) != sigen))
+ err = -EIO;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_ii_br_id(struct inode *inode,
+ aufs_bindex_t bindex)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_hinode[0 + bindex].hi_id;
+}
+
+static inline aufs_bindex_t au_ibstart(struct inode *inode)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_bstart;
+}
+
+static inline aufs_bindex_t au_ibend(struct inode *inode)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_bend;
+}
+
+static inline struct au_vdir *au_ivdir(struct inode *inode)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_vdir;
+}
+
+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry;
+}
+
+static inline void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustWriteLock(inode);
+ au_ii(inode)->ii_bstart = bindex;
+}
+
+static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustWriteLock(inode);
+ au_ii(inode)->ii_bend = bindex;
+}
+
+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir)
+{
+ IiMustWriteLock(inode);
+ au_ii(inode)->ii_vdir = vdir;
+}
+
+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex)
+{
+ IiMustAnyLock(inode);
+ return au_ii(inode)->ii_hinode + bindex;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_pinned_parent(struct au_pin *pin)
+{
+ if (pin)
+ return pin->parent;
+ return NULL;
+}
+
+static inline struct inode *au_pinned_h_dir(struct au_pin *pin)
+{
+ if (pin && pin->hdir)
+ return pin->hdir->hi_inode;
+ return NULL;
+}
+
+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin)
+{
+ if (pin)
+ return pin->hdir;
+ return NULL;
+}
+
+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry)
+{
+ if (pin)
+ pin->dentry = dentry;
+}
+
+static inline void au_pin_set_parent_lflag(struct au_pin *pin,
+ unsigned char lflag)
+{
+ if (pin) {
+ if (lflag)
+ au_fset_pin(pin->flags, DI_LOCKED);
+ else
+ au_fclr_pin(pin->flags, DI_LOCKED);
+ }
+}
+
+#if 0 /* reserved */
+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent)
+{
+ if (pin) {
+ dput(pin->parent);
+ pin->parent = dget(parent);
+ }
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_branch;
+#ifdef CONFIG_AUFS_HNOTIFY
+struct au_hnotify_op {
+ void (*ctl)(struct au_hinode *hinode, int do_set);
+ int (*alloc)(struct au_hinode *hinode);
+
+ /*
+ * if it returns true, the the caller should free hinode->hi_notify,
+ * otherwise ->free() frees it.
+ */
+ int (*free)(struct au_hinode *hinode,
+ struct au_hnotify *hn) __must_check;
+
+ void (*fin)(void);
+ int (*init)(void);
+
+ int (*reset_br)(unsigned int udba, struct au_branch *br, int perm);
+ void (*fin_br)(struct au_branch *br);
+ int (*init_br)(struct au_branch *br, int perm);
+};
+
+/* hnotify.c */
+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode);
+void au_hn_free(struct au_hinode *hinode);
+void au_hn_ctl(struct au_hinode *hinode, int do_set);
+void au_hn_reset(struct inode *inode, unsigned int flags);
+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask,
+ struct qstr *h_child_qstr, struct inode *h_child_inode);
+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm);
+int au_hnotify_init_br(struct au_branch *br, int perm);
+void au_hnotify_fin_br(struct au_branch *br);
+int __init au_hnotify_init(void);
+void au_hnotify_fin(void);
+
+/* hfsnotify.c */
+extern const struct au_hnotify_op au_hnotify_op;
+
+static inline
+void au_hn_init(struct au_hinode *hinode)
+{
+ hinode->hi_notify = NULL;
+}
+
+static inline struct au_hnotify *au_hn(struct au_hinode *hinode)
+{
+ return hinode->hi_notify;
+}
+
+#else
+AuStub(int, au_hn_alloc, return -EOPNOTSUPP,
+ struct au_hinode *hinode __maybe_unused,
+ struct inode *inode __maybe_unused)
+AuStub(struct au_hnotify *, au_hn, return NULL, struct au_hinode *hinode)
+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused)
+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused,
+ int do_set __maybe_unused)
+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused,
+ unsigned int flags __maybe_unused)
+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused,
+ struct au_branch *br __maybe_unused,
+ int perm __maybe_unused)
+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused,
+ int perm __maybe_unused)
+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused)
+AuStubInt0(__init au_hnotify_init, void)
+AuStubVoid(au_hnotify_fin, void)
+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused)
+#endif /* CONFIG_AUFS_HNOTIFY */
+
+static inline void au_hn_suspend(struct au_hinode *hdir)
+{
+ au_hn_ctl(hdir, /*do_set*/0);
+}
+
+static inline void au_hn_resume(struct au_hinode *hdir)
+{
+ au_hn_ctl(hdir, /*do_set*/1);
+}
+
+static inline void au_hn_imtx_lock(struct au_hinode *hdir)
+{
+ inode_lock(hdir->hi_inode);
+ au_hn_suspend(hdir);
+}
+
+static inline void au_hn_imtx_lock_nested(struct au_hinode *hdir,
+ unsigned int sc __maybe_unused)
+{
+ inode_lock_nested(hdir->hi_inode, sc);
+ au_hn_suspend(hdir);
+}
+
+static inline void au_hn_imtx_unlock(struct au_hinode *hdir)
+{
+ au_hn_resume(hdir);
+ inode_unlock(hdir->hi_inode);
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_INODE_H__ */
diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c
new file mode 100644
index 000000000..6528fb911
--- /dev/null
+++ b/fs/aufs/ioctl.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * ioctl
+ * plink-management and readdir in userspace.
+ * assist the pathconf(3) wrapper library.
+ * move-down
+ * File-based Hierarchical Storage Management.
+ */
+
+#include <linux/compat.h>
+#include <linux/file.h>
+#include "aufs.h"
+
+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg)
+{
+ int err, fd;
+ aufs_bindex_t wbi, bindex, bend;
+ struct file *h_file;
+ struct super_block *sb;
+ struct dentry *root;
+ struct au_branch *br;
+ struct aufs_wbr_fd wbrfd = {
+ .oflags = au_dir_roflags,
+ .brid = -1
+ };
+ const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY
+ | O_NOATIME | O_CLOEXEC;
+
+ AuDebugOn(wbrfd.oflags & ~valid);
+
+ if (arg) {
+ err = copy_from_user(&wbrfd, arg, sizeof(wbrfd));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = -EINVAL;
+ AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid);
+ wbrfd.oflags |= au_dir_roflags;
+ AuDbg("0%o\n", wbrfd.oflags);
+ if (unlikely(wbrfd.oflags & ~valid))
+ goto out;
+ }
+
+ fd = get_unused_fd_flags(0);
+ err = fd;
+ if (unlikely(fd < 0))
+ goto out;
+
+ h_file = ERR_PTR(-EINVAL);
+ wbi = 0;
+ br = NULL;
+ sb = path->dentry->d_sb;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_IR);
+ bend = au_sbend(sb);
+ if (wbrfd.brid >= 0) {
+ wbi = au_br_index(sb, wbrfd.brid);
+ if (unlikely(wbi < 0 || wbi > bend))
+ goto out_unlock;
+ }
+
+ h_file = ERR_PTR(-ENOENT);
+ br = au_sbr(sb, wbi);
+ if (!au_br_writable(br->br_perm)) {
+ if (arg)
+ goto out_unlock;
+
+ bindex = wbi + 1;
+ wbi = -1;
+ for (; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_writable(br->br_perm)) {
+ wbi = bindex;
+ br = au_sbr(sb, wbi);
+ break;
+ }
+ }
+ }
+ AuDbg("wbi %d\n", wbi);
+ if (wbi >= 0)
+ h_file = au_h_open(root, wbi, wbrfd.oflags, NULL,
+ /*force_wr*/0);
+
+out_unlock:
+ aufs_read_unlock(root, AuLock_IR);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out_fd;
+
+ atomic_dec(&br->br_count); /* cf. au_h_open() */
+ fd_install(fd, h_file);
+ err = fd;
+ goto out; /* success */
+
+out_fd:
+ put_unused_fd(fd);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err;
+ struct dentry *dentry;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_ioctl(file, cmd, arg);
+ break;
+
+ case AUFS_CTL_WBR_FD:
+ err = au_wbr_fd(&file->f_path, (void __user *)arg);
+ break;
+
+ case AUFS_CTL_IBUSY:
+ err = au_ibusy_ioctl(file, arg);
+ break;
+
+ case AUFS_CTL_BRINFO:
+ err = au_brinfo_ioctl(file, arg);
+ break;
+
+ case AUFS_CTL_FHSM_FD:
+ dentry = file->f_path.dentry;
+ if (IS_ROOT(dentry))
+ err = au_fhsm_fd(dentry->d_sb, arg);
+ else
+ err = -ENOTTY;
+ break;
+
+ default:
+ /* do not call the lower */
+ AuDbg("0x%x\n", cmd);
+ err = -ENOTTY;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err;
+
+ switch (cmd) {
+ case AUFS_CTL_MVDOWN:
+ err = au_mvdown(file->f_path.dentry, (void __user *)arg);
+ break;
+
+ case AUFS_CTL_WBR_FD:
+ err = au_wbr_fd(&file->f_path, (void __user *)arg);
+ break;
+
+ default:
+ /* do not call the lower */
+ AuDbg("0x%x\n", cmd);
+ err = -ENOTTY;
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long err;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_compat_ioctl(file, cmd, arg);
+ break;
+
+ case AUFS_CTL_IBUSY:
+ err = au_ibusy_compat_ioctl(file, arg);
+ break;
+
+ case AUFS_CTL_BRINFO:
+ err = au_brinfo_compat_ioctl(file, arg);
+ break;
+
+ default:
+ err = aufs_ioctl_dir(file, cmd, arg);
+ }
+
+ AuTraceErr(err);
+ return err;
+}
+
+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c
new file mode 100644
index 000000000..5711e7a2f
--- /dev/null
+++ b/fs/aufs/loop.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * support for loopback block device as a branch
+ */
+
+#include "aufs.h"
+
+/* added into drivers/block/loop.c */
+static struct file *(*backing_file_func)(struct super_block *sb);
+
+/*
+ * test if two lower dentries have overlapping branches.
+ */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding)
+{
+ struct super_block *h_sb;
+ struct file *backing_file;
+
+ if (unlikely(!backing_file_func)) {
+ /* don't load "loop" module here */
+ backing_file_func = symbol_get(loop_backing_file);
+ if (unlikely(!backing_file_func))
+ /* "loop" module is not loaded */
+ return 0;
+ }
+
+ h_sb = h_adding->d_sb;
+ backing_file = backing_file_func(h_sb);
+ if (!backing_file)
+ return 0;
+
+ h_adding = backing_file->f_path.dentry;
+ /*
+ * h_adding can be local NFS.
+ * in this case aufs cannot detect the loop.
+ */
+ if (unlikely(h_adding->d_sb == sb))
+ return 1;
+ return !!au_test_subdir(h_adding, sb->s_root);
+}
+
+/* true if a kernel thread named 'loop[0-9].*' accesses a file */
+int au_test_loopback_kthread(void)
+{
+ int ret;
+ struct task_struct *tsk = current;
+ char c, comm[sizeof(tsk->comm)];
+
+ ret = 0;
+ if (tsk->flags & PF_KTHREAD) {
+ get_task_comm(comm, tsk);
+ c = comm[4];
+ ret = ('0' <= c && c <= '9'
+ && !strncmp(comm, "loop", 4));
+ }
+
+ return ret;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define au_warn_loopback_step 16
+static int au_warn_loopback_nelem = au_warn_loopback_step;
+static unsigned long *au_warn_loopback_array;
+
+void au_warn_loopback(struct super_block *h_sb)
+{
+ int i, new_nelem;
+ unsigned long *a, magic;
+ static DEFINE_SPINLOCK(spin);
+
+ magic = h_sb->s_magic;
+ spin_lock(&spin);
+ a = au_warn_loopback_array;
+ for (i = 0; i < au_warn_loopback_nelem && *a; i++)
+ if (a[i] == magic) {
+ spin_unlock(&spin);
+ return;
+ }
+
+ /* h_sb is new to us, print it */
+ if (i < au_warn_loopback_nelem) {
+ a[i] = magic;
+ goto pr;
+ }
+
+ /* expand the array */
+ new_nelem = au_warn_loopback_nelem + au_warn_loopback_step;
+ a = au_kzrealloc(au_warn_loopback_array,
+ au_warn_loopback_nelem * sizeof(unsigned long),
+ new_nelem * sizeof(unsigned long), GFP_ATOMIC);
+ if (a) {
+ au_warn_loopback_nelem = new_nelem;
+ au_warn_loopback_array = a;
+ a[i] = magic;
+ goto pr;
+ }
+
+ spin_unlock(&spin);
+ AuWarn1("realloc failed, ignored\n");
+ return;
+
+pr:
+ spin_unlock(&spin);
+ pr_warn("you may want to try another patch for loopback file "
+ "on %s(0x%lx) branch\n", au_sbtype(h_sb), magic);
+}
+
+int au_loopback_init(void)
+{
+ int err;
+ struct super_block *sb __maybe_unused;
+
+ BUILD_BUG_ON(sizeof(sb->s_magic) != sizeof(unsigned long));
+
+ err = 0;
+ au_warn_loopback_array = kcalloc(au_warn_loopback_step,
+ sizeof(unsigned long), GFP_NOFS);
+ if (unlikely(!au_warn_loopback_array))
+ err = -ENOMEM;
+
+ return err;
+}
+
+void au_loopback_fin(void)
+{
+ if (backing_file_func)
+ symbol_put(loop_backing_file);
+ kfree(au_warn_loopback_array);
+}
diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h
new file mode 100644
index 000000000..48bf070e8
--- /dev/null
+++ b/fs/aufs/loop.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * support for loopback mount as a branch
+ */
+
+#ifndef __AUFS_LOOP_H__
+#define __AUFS_LOOP_H__
+
+#ifdef __KERNEL__
+
+struct dentry;
+struct super_block;
+
+#ifdef CONFIG_AUFS_BDEV_LOOP
+/* drivers/block/loop.c */
+struct file *loop_backing_file(struct super_block *sb);
+
+/* loop.c */
+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding);
+int au_test_loopback_kthread(void);
+void au_warn_loopback(struct super_block *h_sb);
+
+int au_loopback_init(void);
+void au_loopback_fin(void);
+#else
+AuStubInt0(au_test_loopback_overlap, struct super_block *sb,
+ struct dentry *h_adding)
+AuStubInt0(au_test_loopback_kthread, void)
+AuStubVoid(au_warn_loopback, struct super_block *h_sb)
+
+AuStubInt0(au_loopback_init, void)
+AuStubVoid(au_loopback_fin, void)
+#endif /* BLK_DEV_LOOP */
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_LOOP_H__ */
diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk
new file mode 100644
index 000000000..4f83bdf1d
--- /dev/null
+++ b/fs/aufs/magic.mk
@@ -0,0 +1,30 @@
+
+# defined in ${srctree}/fs/fuse/inode.c
+# tristate
+ifdef CONFIG_FUSE_FS
+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546
+endif
+
+# defined in ${srctree}/fs/xfs/xfs_sb.h
+# tristate
+ifdef CONFIG_XFS_FS
+ccflags-y += -DXFS_SB_MAGIC=0x58465342
+endif
+
+# defined in ${srctree}/fs/configfs/mount.c
+# tristate
+ifdef CONFIG_CONFIGFS_FS
+ccflags-y += -DCONFIGFS_MAGIC=0x62656570
+endif
+
+# defined in ${srctree}/fs/ubifs/ubifs.h
+# tristate
+ifdef CONFIG_UBIFS_FS
+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905
+endif
+
+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h
+# tristate
+ifdef CONFIG_HFSPLUS_FS
+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b
+endif
diff --git a/fs/aufs/module.c b/fs/aufs/module.c
new file mode 100644
index 000000000..8a28377c5
--- /dev/null
+++ b/fs/aufs/module.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * module global variables and operations
+ */
+
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp)
+{
+ if (new_sz <= nused)
+ return p;
+
+ p = krealloc(p, new_sz, gfp);
+ if (p)
+ memset(p + nused, 0, new_sz - nused);
+ return p;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * aufs caches
+ */
+struct kmem_cache *au_cachep[AuCache_Last];
+static int __init au_cache_init(void)
+{
+ au_cachep[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once);
+ if (au_cachep[AuCache_DINFO])
+ /* SLAB_DESTROY_BY_RCU */
+ au_cachep[AuCache_ICNTNR] = AuCacheCtor(au_icntnr,
+ au_icntnr_init_once);
+ if (au_cachep[AuCache_ICNTNR])
+ au_cachep[AuCache_FINFO] = AuCacheCtor(au_finfo,
+ au_fi_init_once);
+ if (au_cachep[AuCache_FINFO])
+ au_cachep[AuCache_VDIR] = AuCache(au_vdir);
+ if (au_cachep[AuCache_VDIR])
+ au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr);
+ if (au_cachep[AuCache_DEHSTR])
+ return 0;
+
+ return -ENOMEM;
+}
+
+static void au_cache_fin(void)
+{
+ int i;
+
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+
+ /* excluding AuCache_HNOTIFY */
+ BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last);
+ for (i = 0; i < AuCache_HNOTIFY; i++) {
+ kmem_cache_destroy(au_cachep[i]);
+ au_cachep[i] = NULL;
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_dir_roflags;
+
+#ifdef CONFIG_AUFS_SBILIST
+/*
+ * iterate_supers_type() doesn't protect us from
+ * remounting (branch management)
+ */
+struct au_splhead au_sbilist;
+#endif
+
+struct lock_class_key au_lc_key[AuLcKey_Last];
+
+/*
+ * functions for module interface.
+ */
+MODULE_LICENSE("GPL");
+/* MODULE_LICENSE("GPL v2"); */
+MODULE_AUTHOR("Junjiro R. Okajima <aufs-users@lists.sourceforge.net>");
+MODULE_DESCRIPTION(AUFS_NAME
+ " -- Advanced multi layered unification filesystem");
+MODULE_VERSION(AUFS_VERSION);
+
+/* this module parameter has no meaning when SYSFS is disabled */
+int sysaufs_brs = 1;
+MODULE_PARM_DESC(brs, "use <sysfs>/fs/aufs/si_*/brN");
+module_param_named(brs, sysaufs_brs, int, S_IRUGO);
+
+/* this module parameter has no meaning when USER_NS is disabled */
+bool au_userns;
+MODULE_PARM_DESC(allow_userns, "allow unprivileged to mount under userns");
+module_param_named(allow_userns, au_userns, bool, S_IRUGO);
+
+/* ---------------------------------------------------------------------- */
+
+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */
+
+int au_seq_path(struct seq_file *seq, struct path *path)
+{
+ int err;
+
+ err = seq_path(seq, path, au_esc_chars);
+ if (err > 0)
+ err = 0;
+ else if (err < 0)
+ err = -ENOMEM;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int __init aufs_init(void)
+{
+ int err, i;
+ char *p;
+
+ p = au_esc_chars;
+ for (i = 1; i <= ' '; i++)
+ *p++ = i;
+ *p++ = '\\';
+ *p++ = '\x7f';
+ *p = 0;
+
+ au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE);
+
+ memcpy(aufs_iop_nogetattr, aufs_iop, sizeof(aufs_iop));
+ for (i = 0; i < AuIop_Last; i++)
+ aufs_iop_nogetattr[i].getattr = NULL;
+
+ au_sbilist_init();
+ sysaufs_brs_init();
+ au_debug_init();
+ au_dy_init();
+ err = sysaufs_init();
+ if (unlikely(err))
+ goto out;
+ err = au_procfs_init();
+ if (unlikely(err))
+ goto out_sysaufs;
+ err = au_wkq_init();
+ if (unlikely(err))
+ goto out_procfs;
+ err = au_loopback_init();
+ if (unlikely(err))
+ goto out_wkq;
+ err = au_hnotify_init();
+ if (unlikely(err))
+ goto out_loopback;
+ err = au_sysrq_init();
+ if (unlikely(err))
+ goto out_hin;
+ err = au_cache_init();
+ if (unlikely(err))
+ goto out_sysrq;
+
+ aufs_fs_type.fs_flags |= au_userns ? FS_USERNS_MOUNT : 0;
+ err = register_filesystem(&aufs_fs_type);
+ if (unlikely(err))
+ goto out_cache;
+
+ /* since we define pr_fmt, call printk directly */
+ printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n");
+ goto out; /* success */
+
+out_cache:
+ au_cache_fin();
+out_sysrq:
+ au_sysrq_fin();
+out_hin:
+ au_hnotify_fin();
+out_loopback:
+ au_loopback_fin();
+out_wkq:
+ au_wkq_fin();
+out_procfs:
+ au_procfs_fin();
+out_sysaufs:
+ sysaufs_fin();
+ au_dy_fin();
+out:
+ return err;
+}
+
+static void __exit aufs_exit(void)
+{
+ unregister_filesystem(&aufs_fs_type);
+ au_cache_fin();
+ au_sysrq_fin();
+ au_hnotify_fin();
+ au_loopback_fin();
+ au_wkq_fin();
+ au_procfs_fin();
+ sysaufs_fin();
+ au_dy_fin();
+}
+
+module_init(aufs_init);
+module_exit(aufs_exit);
diff --git a/fs/aufs/module.h b/fs/aufs/module.h
new file mode 100644
index 000000000..bb8644730
--- /dev/null
+++ b/fs/aufs/module.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * module initialization and module-global
+ */
+
+#ifndef __AUFS_MODULE_H__
+#define __AUFS_MODULE_H__
+
+#ifdef __KERNEL__
+
+#include <linux/slab.h>
+
+struct path;
+struct seq_file;
+
+/* module parameters */
+extern int sysaufs_brs;
+extern bool au_userns;
+
+/* ---------------------------------------------------------------------- */
+
+extern int au_dir_roflags;
+
+enum {
+ AuLcNonDir_FIINFO,
+ AuLcNonDir_DIINFO,
+ AuLcNonDir_IIINFO,
+
+ AuLcDir_FIINFO,
+ AuLcDir_DIINFO,
+ AuLcDir_IIINFO,
+
+ AuLcSymlink_DIINFO,
+ AuLcSymlink_IIINFO,
+
+ AuLcKey_Last
+};
+extern struct lock_class_key au_lc_key[AuLcKey_Last];
+
+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp);
+int au_seq_path(struct seq_file *seq, struct path *path);
+
+#ifdef CONFIG_PROC_FS
+/* procfs.c */
+int __init au_procfs_init(void);
+void au_procfs_fin(void);
+#else
+AuStubInt0(au_procfs_init, void);
+AuStubVoid(au_procfs_fin, void);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+/* kmem cache */
+enum {
+ AuCache_DINFO,
+ AuCache_ICNTNR,
+ AuCache_FINFO,
+ AuCache_VDIR,
+ AuCache_DEHSTR,
+ AuCache_HNOTIFY, /* must be last */
+ AuCache_Last
+};
+
+#define AuCacheFlags (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD)
+#define AuCache(type) KMEM_CACHE(type, AuCacheFlags)
+#define AuCacheCtor(type, ctor) \
+ kmem_cache_create(#type, sizeof(struct type), \
+ __alignof__(struct type), AuCacheFlags, ctor)
+
+extern struct kmem_cache *au_cachep[];
+
+#define AuCacheFuncs(name, index) \
+static inline struct au_##name *au_cache_alloc_##name(void) \
+{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \
+static inline void au_cache_free_##name(struct au_##name *p) \
+{ kmem_cache_free(au_cachep[AuCache_##index], p); }
+
+AuCacheFuncs(dinfo, DINFO);
+AuCacheFuncs(icntnr, ICNTNR);
+AuCacheFuncs(finfo, FINFO);
+AuCacheFuncs(vdir, VDIR);
+AuCacheFuncs(vdir_dehstr, DEHSTR);
+#ifdef CONFIG_AUFS_HNOTIFY
+AuCacheFuncs(hnotify, HNOTIFY);
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_MODULE_H__ */
diff --git a/fs/aufs/mvdown.c b/fs/aufs/mvdown.c
new file mode 100644
index 000000000..d8415422b
--- /dev/null
+++ b/fs/aufs/mvdown.c
@@ -0,0 +1,690 @@
+/*
+ * Copyright (C) 2011-2016 Junjiro R. Okajima
+ */
+
+/*
+ * move-down, opposite of copy-up
+ */
+
+#include "aufs.h"
+
+struct au_mvd_args {
+ struct {
+ struct super_block *h_sb;
+ struct dentry *h_parent;
+ struct au_hinode *hdir;
+ struct inode *h_dir, *h_inode;
+ struct au_pin pin;
+ } info[AUFS_MVDOWN_NARRAY];
+
+ struct aufs_mvdown mvdown;
+ struct dentry *dentry, *parent;
+ struct inode *inode, *dir;
+ struct super_block *sb;
+ aufs_bindex_t bopq, bwh, bfound;
+ unsigned char rename_lock;
+};
+
+#define mvd_errno mvdown.au_errno
+#define mvd_bsrc mvdown.stbr[AUFS_MVDOWN_UPPER].bindex
+#define mvd_src_brid mvdown.stbr[AUFS_MVDOWN_UPPER].brid
+#define mvd_bdst mvdown.stbr[AUFS_MVDOWN_LOWER].bindex
+#define mvd_dst_brid mvdown.stbr[AUFS_MVDOWN_LOWER].brid
+
+#define mvd_h_src_sb info[AUFS_MVDOWN_UPPER].h_sb
+#define mvd_h_src_parent info[AUFS_MVDOWN_UPPER].h_parent
+#define mvd_hdir_src info[AUFS_MVDOWN_UPPER].hdir
+#define mvd_h_src_dir info[AUFS_MVDOWN_UPPER].h_dir
+#define mvd_h_src_inode info[AUFS_MVDOWN_UPPER].h_inode
+#define mvd_pin_src info[AUFS_MVDOWN_UPPER].pin
+
+#define mvd_h_dst_sb info[AUFS_MVDOWN_LOWER].h_sb
+#define mvd_h_dst_parent info[AUFS_MVDOWN_LOWER].h_parent
+#define mvd_hdir_dst info[AUFS_MVDOWN_LOWER].hdir
+#define mvd_h_dst_dir info[AUFS_MVDOWN_LOWER].h_dir
+#define mvd_h_dst_inode info[AUFS_MVDOWN_LOWER].h_inode
+#define mvd_pin_dst info[AUFS_MVDOWN_LOWER].pin
+
+#define AU_MVD_PR(flag, ...) do { \
+ if (flag) \
+ pr_err(__VA_ARGS__); \
+ } while (0)
+
+static int find_lower_writable(struct au_mvd_args *a)
+{
+ struct super_block *sb;
+ aufs_bindex_t bindex, bend;
+ struct au_branch *br;
+
+ sb = a->sb;
+ bindex = a->mvd_bsrc;
+ bend = au_sbend(sb);
+ if (a->mvdown.flags & AUFS_MVDOWN_FHSM_LOWER)
+ for (bindex++; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm)
+ && (!(au_br_sb(br)->s_flags & MS_RDONLY)))
+ return bindex;
+ }
+ else if (!(a->mvdown.flags & AUFS_MVDOWN_ROLOWER))
+ for (bindex++; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!au_br_rdonly(br))
+ return bindex;
+ }
+ else
+ for (bindex++; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!(au_br_sb(br)->s_flags & MS_RDONLY)) {
+ if (au_br_rdonly(br))
+ a->mvdown.flags
+ |= AUFS_MVDOWN_ROLOWER_R;
+ return bindex;
+ }
+ }
+
+ return -1;
+}
+
+/* make the parent dir on bdst */
+static int au_do_mkdir(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+
+ err = 0;
+ a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc);
+ a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst);
+ a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc);
+ a->mvd_h_dst_parent = NULL;
+ if (au_dbend(a->parent) >= a->mvd_bdst)
+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+ if (!a->mvd_h_dst_parent) {
+ err = au_cpdown_dirs(a->dentry, a->mvd_bdst);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "cpdown_dirs failed\n");
+ goto out;
+ }
+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* lock them all */
+static int au_do_lock(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct dentry *h_trap;
+
+ a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc);
+ a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst);
+ err = au_pin(&a->mvd_pin_dst, a->dentry, a->mvd_bdst,
+ au_opt_udba(a->sb),
+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+ AuTraceErr(err);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "pin_dst failed\n");
+ goto out;
+ }
+
+ if (a->mvd_h_src_sb != a->mvd_h_dst_sb) {
+ a->rename_lock = 0;
+ au_pin_init(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
+ AuLsc_DI_PARENT, AuLsc_I_PARENT3,
+ au_opt_udba(a->sb),
+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+ err = au_do_pin(&a->mvd_pin_src);
+ AuTraceErr(err);
+ a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "pin_src failed\n");
+ goto out_dst;
+ }
+ goto out; /* success */
+ }
+
+ a->rename_lock = 1;
+ au_pin_hdir_unlock(&a->mvd_pin_dst);
+ err = au_pin(&a->mvd_pin_src, a->dentry, a->mvd_bsrc,
+ au_opt_udba(a->sb),
+ AuPin_MNT_WRITE | AuPin_DI_LOCKED);
+ AuTraceErr(err);
+ a->mvd_h_src_dir = d_inode(a->mvd_h_src_parent);
+ if (unlikely(err)) {
+ AU_MVD_PR(dmsg, "pin_src failed\n");
+ au_pin_hdir_lock(&a->mvd_pin_dst);
+ goto out_dst;
+ }
+ au_pin_hdir_unlock(&a->mvd_pin_src);
+ h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
+ if (h_trap) {
+ err = (h_trap != a->mvd_h_src_parent);
+ if (err)
+ err = (h_trap != a->mvd_h_dst_parent);
+ }
+ BUG_ON(err); /* it should never happen */
+ if (unlikely(a->mvd_h_src_dir != au_pinned_h_dir(&a->mvd_pin_src))) {
+ err = -EBUSY;
+ AuTraceErr(err);
+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
+ au_pin_hdir_lock(&a->mvd_pin_src);
+ au_unpin(&a->mvd_pin_src);
+ au_pin_hdir_lock(&a->mvd_pin_dst);
+ goto out_dst;
+ }
+ goto out; /* success */
+
+out_dst:
+ au_unpin(&a->mvd_pin_dst);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static void au_do_unlock(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ if (!a->rename_lock)
+ au_unpin(&a->mvd_pin_src);
+ else {
+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src,
+ a->mvd_h_dst_parent, a->mvd_hdir_dst);
+ au_pin_hdir_lock(&a->mvd_pin_src);
+ au_unpin(&a->mvd_pin_src);
+ au_pin_hdir_lock(&a->mvd_pin_dst);
+ }
+ au_unpin(&a->mvd_pin_dst);
+}
+
+/* copy-down the file */
+static int au_do_cpdown(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct au_cp_generic cpg = {
+ .dentry = a->dentry,
+ .bdst = a->mvd_bdst,
+ .bsrc = a->mvd_bsrc,
+ .len = -1,
+ .pin = &a->mvd_pin_dst,
+ .flags = AuCpup_DTIME | AuCpup_HOPEN
+ };
+
+ AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst);
+ if (a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
+ au_fset_cpup(cpg.flags, OVERWRITE);
+ if (a->mvdown.flags & AUFS_MVDOWN_ROLOWER)
+ au_fset_cpup(cpg.flags, RWDST);
+ err = au_sio_cpdown_simple(&cpg);
+ if (unlikely(err))
+ AU_MVD_PR(dmsg, "cpdown failed\n");
+
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * unlink the whiteout on bdst if exist which may be created by UDBA while we
+ * were sleeping
+ */
+static int au_do_unlink_wh(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct path h_path;
+ struct au_branch *br;
+ struct inode *delegated;
+
+ br = au_sbr(a->sb, a->mvd_bdst);
+ h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry)) {
+ AU_MVD_PR(dmsg, "wh_lkup failed\n");
+ goto out;
+ }
+
+ err = 0;
+ if (d_is_positive(h_path.dentry)) {
+ h_path.mnt = au_br_mnt(br);
+ delegated = NULL;
+ err = vfsub_unlink(d_inode(a->mvd_h_dst_parent), &h_path,
+ &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err))
+ AU_MVD_PR(dmsg, "wh_unlink failed\n");
+ }
+ dput(h_path.dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/*
+ * unlink the topmost h_dentry
+ */
+static int au_do_unlink(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct path h_path;
+ struct inode *delegated;
+
+ h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc);
+ h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc);
+ delegated = NULL;
+ err = vfsub_unlink(a->mvd_h_src_dir, &h_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err))
+ AU_MVD_PR(dmsg, "unlink failed\n");
+
+ AuTraceErr(err);
+ return err;
+}
+
+/* Since mvdown succeeded, we ignore an error of this function */
+static void au_do_stfs(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct au_branch *br;
+
+ a->mvdown.flags |= AUFS_MVDOWN_STFS_FAILED;
+ br = au_sbr(a->sb, a->mvd_bsrc);
+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_UPPER].stfs);
+ if (!err) {
+ br = au_sbr(a->sb, a->mvd_bdst);
+ a->mvdown.stbr[AUFS_MVDOWN_LOWER].brid = br->br_id;
+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_LOWER].stfs);
+ }
+ if (!err)
+ a->mvdown.flags &= ~AUFS_MVDOWN_STFS_FAILED;
+ else
+ AU_MVD_PR(dmsg, "statfs failed (%d), ignored\n", err);
+}
+
+/*
+ * copy-down the file and unlink the bsrc file.
+ * - unlink the bdst whout if exist
+ * - copy-down the file (with whtmp name and rename)
+ * - unlink the bsrc file
+ */
+static int au_do_mvdown(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+
+ err = au_do_mkdir(dmsg, a);
+ if (!err)
+ err = au_do_lock(dmsg, a);
+ if (unlikely(err))
+ goto out;
+
+ /*
+ * do not revert the activities we made on bdst since they should be
+ * harmless in aufs.
+ */
+
+ err = au_do_cpdown(dmsg, a);
+ if (!err)
+ err = au_do_unlink_wh(dmsg, a);
+ if (!err && !(a->mvdown.flags & AUFS_MVDOWN_KUPPER))
+ err = au_do_unlink(dmsg, a);
+ if (unlikely(err))
+ goto out_unlock;
+
+ AuDbg("%pd2, 0x%x, %d --> %d\n",
+ a->dentry, a->mvdown.flags, a->mvd_bsrc, a->mvd_bdst);
+ if (find_lower_writable(a) < 0)
+ a->mvdown.flags |= AUFS_MVDOWN_BOTTOM;
+
+ if (a->mvdown.flags & AUFS_MVDOWN_STFS)
+ au_do_stfs(dmsg, a);
+
+ /* maintain internal array */
+ if (!(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) {
+ au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL);
+ au_set_dbstart(a->dentry, a->mvd_bdst);
+ au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0);
+ au_set_ibstart(a->inode, a->mvd_bdst);
+ } else {
+ /* hide the lower */
+ au_set_h_dptr(a->dentry, a->mvd_bdst, NULL);
+ au_set_dbend(a->dentry, a->mvd_bsrc);
+ au_set_h_iptr(a->inode, a->mvd_bdst, NULL, /*flags*/0);
+ au_set_ibend(a->inode, a->mvd_bsrc);
+ }
+ if (au_dbend(a->dentry) < a->mvd_bdst)
+ au_set_dbend(a->dentry, a->mvd_bdst);
+ if (au_ibend(a->inode) < a->mvd_bdst)
+ au_set_ibend(a->inode, a->mvd_bdst);
+
+out_unlock:
+ au_do_unlock(dmsg, a);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* make sure the file is idle */
+static int au_mvd_args_busy(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err, plinked;
+
+ err = 0;
+ plinked = !!au_opt_test(au_mntflags(a->sb), PLINK);
+ if (au_dbstart(a->dentry) == a->mvd_bsrc
+ && au_dcount(a->dentry) == 1
+ && atomic_read(&a->inode->i_count) == 1
+ /* && a->mvd_h_src_inode->i_nlink == 1 */
+ && (!plinked || !au_plink_test(a->inode))
+ && a->inode->i_nlink == 1)
+ goto out;
+
+ err = -EBUSY;
+ AU_MVD_PR(dmsg,
+ "b%d, d{b%d, c%d?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n",
+ a->mvd_bsrc, au_dbstart(a->dentry), au_dcount(a->dentry),
+ atomic_read(&a->inode->i_count), a->inode->i_nlink,
+ a->mvd_h_src_inode->i_nlink,
+ plinked, plinked ? au_plink_test(a->inode) : 0);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* make sure the parent dir is fine */
+static int au_mvd_args_parent(const unsigned char dmsg,
+ struct au_mvd_args *a)
+{
+ int err;
+ aufs_bindex_t bindex;
+
+ err = 0;
+ if (unlikely(au_alive_dir(a->parent))) {
+ err = -ENOENT;
+ AU_MVD_PR(dmsg, "parent dir is dead\n");
+ goto out;
+ }
+
+ a->bopq = au_dbdiropq(a->parent);
+ bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst);
+ AuDbg("b%d\n", bindex);
+ if (unlikely((bindex >= 0 && bindex < a->mvd_bdst)
+ || (a->bopq != -1 && a->bopq < a->mvd_bdst))) {
+ err = -EINVAL;
+ a->mvd_errno = EAU_MVDOWN_OPAQUE;
+ AU_MVD_PR(dmsg, "ancestor is opaque b%d, b%d\n",
+ a->bopq, a->mvd_bdst);
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_mvd_args_intermediate(const unsigned char dmsg,
+ struct au_mvd_args *a)
+{
+ int err;
+ struct au_dinfo *dinfo, *tmp;
+
+ /* lookup the next lower positive entry */
+ err = -ENOMEM;
+ tmp = au_di_alloc(a->sb, AuLsc_DI_TMP);
+ if (unlikely(!tmp))
+ goto out;
+
+ a->bfound = -1;
+ a->bwh = -1;
+ dinfo = au_di(a->dentry);
+ au_di_cp(tmp, dinfo);
+ au_di_swap(tmp, dinfo);
+
+ /* returns the number of positive dentries */
+ err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1, /*type*/0);
+ if (!err)
+ a->bwh = au_dbwh(a->dentry);
+ else if (err > 0)
+ a->bfound = au_dbstart(a->dentry);
+
+ au_di_swap(tmp, dinfo);
+ au_rw_write_unlock(&tmp->di_rwsem);
+ au_di_free(tmp);
+ if (unlikely(err < 0))
+ AU_MVD_PR(dmsg, "failed look-up lower\n");
+
+ /*
+ * here, we have these cases.
+ * bfound == -1
+ * no positive dentry under bsrc. there are more sub-cases.
+ * bwh < 0
+ * there no whiteout, we can safely move-down.
+ * bwh <= bsrc
+ * impossible
+ * bsrc < bwh && bwh < bdst
+ * there is a whiteout on RO branch. cannot proceed.
+ * bwh == bdst
+ * there is a whiteout on the RW target branch. it should
+ * be removed.
+ * bdst < bwh
+ * there is a whiteout somewhere unrelated branch.
+ * -1 < bfound && bfound <= bsrc
+ * impossible.
+ * bfound < bdst
+ * found, but it is on RO branch between bsrc and bdst. cannot
+ * proceed.
+ * bfound == bdst
+ * found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return
+ * error.
+ * bdst < bfound
+ * found, after we create the file on bdst, it will be hidden.
+ */
+
+ AuDebugOn(a->bfound == -1
+ && a->bwh != -1
+ && a->bwh <= a->mvd_bsrc);
+ AuDebugOn(-1 < a->bfound
+ && a->bfound <= a->mvd_bsrc);
+
+ err = -EINVAL;
+ if (a->bfound == -1
+ && a->mvd_bsrc < a->bwh
+ && a->bwh != -1
+ && a->bwh < a->mvd_bdst) {
+ a->mvd_errno = EAU_MVDOWN_WHITEOUT;
+ AU_MVD_PR(dmsg, "bsrc %d, bdst %d, bfound %d, bwh %d\n",
+ a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh);
+ goto out;
+ } else if (a->bfound != -1 && a->bfound < a->mvd_bdst) {
+ a->mvd_errno = EAU_MVDOWN_UPPER;
+ AU_MVD_PR(dmsg, "bdst %d, bfound %d\n",
+ a->mvd_bdst, a->bfound);
+ goto out;
+ }
+
+ err = 0; /* success */
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_mvd_args_exist(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+
+ err = 0;
+ if (!(a->mvdown.flags & AUFS_MVDOWN_OWLOWER)
+ && a->bfound == a->mvd_bdst)
+ err = -EEXIST;
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_mvd_args(const unsigned char dmsg, struct au_mvd_args *a)
+{
+ int err;
+ struct au_branch *br;
+
+ err = -EISDIR;
+ if (unlikely(S_ISDIR(a->inode->i_mode)))
+ goto out;
+
+ err = -EINVAL;
+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_UPPER))
+ a->mvd_bsrc = au_ibstart(a->inode);
+ else {
+ a->mvd_bsrc = au_br_index(a->sb, a->mvd_src_brid);
+ if (unlikely(a->mvd_bsrc < 0
+ || (a->mvd_bsrc < au_dbstart(a->dentry)
+ || au_dbend(a->dentry) < a->mvd_bsrc
+ || !au_h_dptr(a->dentry, a->mvd_bsrc))
+ || (a->mvd_bsrc < au_ibstart(a->inode)
+ || au_ibend(a->inode) < a->mvd_bsrc
+ || !au_h_iptr(a->inode, a->mvd_bsrc)))) {
+ a->mvd_errno = EAU_MVDOWN_NOUPPER;
+ AU_MVD_PR(dmsg, "no upper\n");
+ goto out;
+ }
+ }
+ if (unlikely(a->mvd_bsrc == au_sbend(a->sb))) {
+ a->mvd_errno = EAU_MVDOWN_BOTTOM;
+ AU_MVD_PR(dmsg, "on the bottom\n");
+ goto out;
+ }
+ a->mvd_h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc);
+ br = au_sbr(a->sb, a->mvd_bsrc);
+ err = au_br_rdonly(br);
+ if (!(a->mvdown.flags & AUFS_MVDOWN_ROUPPER)) {
+ if (unlikely(err))
+ goto out;
+ } else if (!(vfsub_native_ro(a->mvd_h_src_inode)
+ || IS_APPEND(a->mvd_h_src_inode))) {
+ if (err)
+ a->mvdown.flags |= AUFS_MVDOWN_ROUPPER_R;
+ /* go on */
+ } else
+ goto out;
+
+ err = -EINVAL;
+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_LOWER)) {
+ a->mvd_bdst = find_lower_writable(a);
+ if (unlikely(a->mvd_bdst < 0)) {
+ a->mvd_errno = EAU_MVDOWN_BOTTOM;
+ AU_MVD_PR(dmsg, "no writable lower branch\n");
+ goto out;
+ }
+ } else {
+ a->mvd_bdst = au_br_index(a->sb, a->mvd_dst_brid);
+ if (unlikely(a->mvd_bdst < 0
+ || au_sbend(a->sb) < a->mvd_bdst)) {
+ a->mvd_errno = EAU_MVDOWN_NOLOWERBR;
+ AU_MVD_PR(dmsg, "no lower brid\n");
+ goto out;
+ }
+ }
+
+ err = au_mvd_args_busy(dmsg, a);
+ if (!err)
+ err = au_mvd_args_parent(dmsg, a);
+ if (!err)
+ err = au_mvd_args_intermediate(dmsg, a);
+ if (!err)
+ err = au_mvd_args_exist(dmsg, a);
+ if (!err)
+ AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg)
+{
+ int err, e;
+ unsigned char dmsg;
+ struct au_mvd_args *args;
+ struct inode *inode;
+
+ inode = d_inode(dentry);
+ err = -EPERM;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = -ENOMEM;
+ args = kmalloc(sizeof(*args), GFP_NOFS);
+ if (unlikely(!args))
+ goto out;
+
+ err = copy_from_user(&args->mvdown, uarg, sizeof(args->mvdown));
+ if (!err)
+ err = !access_ok(VERIFY_WRITE, uarg, sizeof(*uarg));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out_free;
+ }
+ AuDbg("flags 0x%x\n", args->mvdown.flags);
+ args->mvdown.flags &= ~(AUFS_MVDOWN_ROLOWER_R | AUFS_MVDOWN_ROUPPER_R);
+ args->mvdown.au_errno = 0;
+ args->dentry = dentry;
+ args->inode = inode;
+ args->sb = dentry->d_sb;
+
+ err = -ENOENT;
+ dmsg = !!(args->mvdown.flags & AUFS_MVDOWN_DMSG);
+ args->parent = dget_parent(dentry);
+ args->dir = d_inode(args->parent);
+ inode_lock_nested(args->dir, I_MUTEX_PARENT);
+ dput(args->parent);
+ if (unlikely(args->parent != dentry->d_parent)) {
+ AU_MVD_PR(dmsg, "parent dir is moved\n");
+ goto out_dir;
+ }
+
+ inode_lock_nested(inode, I_MUTEX_CHILD);
+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_NOPLMW);
+ if (unlikely(err))
+ goto out_inode;
+
+ di_write_lock_parent(args->parent);
+ err = au_mvd_args(dmsg, args);
+ if (unlikely(err))
+ goto out_parent;
+
+ err = au_do_mvdown(dmsg, args);
+ if (unlikely(err))
+ goto out_parent;
+
+ au_cpup_attr_timesizes(args->dir);
+ au_cpup_attr_timesizes(inode);
+ if (!(args->mvdown.flags & AUFS_MVDOWN_KUPPER))
+ au_cpup_igen(inode, au_h_iptr(inode, args->mvd_bdst));
+ /* au_digen_dec(dentry); */
+
+out_parent:
+ di_write_unlock(args->parent);
+ aufs_read_unlock(dentry, AuLock_DW);
+out_inode:
+ inode_unlock(inode);
+out_dir:
+ inode_unlock(args->dir);
+out_free:
+ e = copy_to_user(uarg, &args->mvdown, sizeof(args->mvdown));
+ if (unlikely(e))
+ err = -EFAULT;
+ kfree(args);
+out:
+ AuTraceErr(err);
+ return err;
+}
diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c
new file mode 100644
index 000000000..fc90b657a
--- /dev/null
+++ b/fs/aufs/opts.c
@@ -0,0 +1,1846 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * mount options/flags
+ */
+
+#include <linux/namei.h>
+#include <linux/types.h> /* a distribution requires */
+#include <linux/parser.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+enum {
+ Opt_br,
+ Opt_add, Opt_del, Opt_mod, Opt_append, Opt_prepend,
+ Opt_idel, Opt_imod,
+ Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash,
+ Opt_rdblk_def, Opt_rdhash_def,
+ Opt_xino, Opt_noxino,
+ Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino,
+ Opt_trunc_xino_path, Opt_itrunc_xino,
+ Opt_trunc_xib, Opt_notrunc_xib,
+ Opt_shwh, Opt_noshwh,
+ Opt_plink, Opt_noplink, Opt_list_plink,
+ Opt_udba,
+ Opt_dio, Opt_nodio,
+ Opt_diropq_a, Opt_diropq_w,
+ Opt_warn_perm, Opt_nowarn_perm,
+ Opt_wbr_copyup, Opt_wbr_create,
+ Opt_fhsm_sec,
+ Opt_verbose, Opt_noverbose,
+ Opt_sum, Opt_nosum, Opt_wsum,
+ Opt_dirperm1, Opt_nodirperm1,
+ Opt_acl, Opt_noacl,
+ Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err
+};
+
+static match_table_t options = {
+ {Opt_br, "br=%s"},
+ {Opt_br, "br:%s"},
+
+ {Opt_add, "add=%d:%s"},
+ {Opt_add, "add:%d:%s"},
+ {Opt_add, "ins=%d:%s"},
+ {Opt_add, "ins:%d:%s"},
+ {Opt_append, "append=%s"},
+ {Opt_append, "append:%s"},
+ {Opt_prepend, "prepend=%s"},
+ {Opt_prepend, "prepend:%s"},
+
+ {Opt_del, "del=%s"},
+ {Opt_del, "del:%s"},
+ /* {Opt_idel, "idel:%d"}, */
+ {Opt_mod, "mod=%s"},
+ {Opt_mod, "mod:%s"},
+ /* {Opt_imod, "imod:%d:%s"}, */
+
+ {Opt_dirwh, "dirwh=%d"},
+
+ {Opt_xino, "xino=%s"},
+ {Opt_noxino, "noxino"},
+ {Opt_trunc_xino, "trunc_xino"},
+ {Opt_trunc_xino_v, "trunc_xino_v=%d:%d"},
+ {Opt_notrunc_xino, "notrunc_xino"},
+ {Opt_trunc_xino_path, "trunc_xino=%s"},
+ {Opt_itrunc_xino, "itrunc_xino=%d"},
+ /* {Opt_zxino, "zxino=%s"}, */
+ {Opt_trunc_xib, "trunc_xib"},
+ {Opt_notrunc_xib, "notrunc_xib"},
+
+#ifdef CONFIG_PROC_FS
+ {Opt_plink, "plink"},
+#else
+ {Opt_ignore_silent, "plink"},
+#endif
+
+ {Opt_noplink, "noplink"},
+
+#ifdef CONFIG_AUFS_DEBUG
+ {Opt_list_plink, "list_plink"},
+#endif
+
+ {Opt_udba, "udba=%s"},
+
+ {Opt_dio, "dio"},
+ {Opt_nodio, "nodio"},
+
+#ifdef CONFIG_AUFS_FHSM
+ {Opt_fhsm_sec, "fhsm_sec=%d"},
+#else
+ {Opt_ignore_silent, "fhsm_sec=%d"},
+#endif
+
+ {Opt_diropq_a, "diropq=always"},
+ {Opt_diropq_a, "diropq=a"},
+ {Opt_diropq_w, "diropq=whiteouted"},
+ {Opt_diropq_w, "diropq=w"},
+
+ {Opt_warn_perm, "warn_perm"},
+ {Opt_nowarn_perm, "nowarn_perm"},
+
+ /* keep them temporary */
+ {Opt_ignore_silent, "nodlgt"},
+ {Opt_ignore_silent, "clean_plink"},
+
+#ifdef CONFIG_AUFS_SHWH
+ {Opt_shwh, "shwh"},
+#endif
+ {Opt_noshwh, "noshwh"},
+
+ {Opt_dirperm1, "dirperm1"},
+ {Opt_nodirperm1, "nodirperm1"},
+
+ {Opt_verbose, "verbose"},
+ {Opt_verbose, "v"},
+ {Opt_noverbose, "noverbose"},
+ {Opt_noverbose, "quiet"},
+ {Opt_noverbose, "q"},
+ {Opt_noverbose, "silent"},
+
+ {Opt_sum, "sum"},
+ {Opt_nosum, "nosum"},
+ {Opt_wsum, "wsum"},
+
+ {Opt_rdcache, "rdcache=%d"},
+ {Opt_rdblk, "rdblk=%d"},
+ {Opt_rdblk_def, "rdblk=def"},
+ {Opt_rdhash, "rdhash=%d"},
+ {Opt_rdhash_def, "rdhash=def"},
+
+ {Opt_wbr_create, "create=%s"},
+ {Opt_wbr_create, "create_policy=%s"},
+ {Opt_wbr_copyup, "cpup=%s"},
+ {Opt_wbr_copyup, "copyup=%s"},
+ {Opt_wbr_copyup, "copyup_policy=%s"},
+
+ /* generic VFS flag */
+#ifdef CONFIG_FS_POSIX_ACL
+ {Opt_acl, "acl"},
+ {Opt_noacl, "noacl"},
+#else
+ {Opt_ignore_silent, "acl"},
+ {Opt_ignore_silent, "noacl"},
+#endif
+
+ /* internal use for the scripts */
+ {Opt_ignore_silent, "si=%s"},
+
+ {Opt_br, "dirs=%s"},
+ {Opt_ignore, "debug=%d"},
+ {Opt_ignore, "delete=whiteout"},
+ {Opt_ignore, "delete=all"},
+ {Opt_ignore, "imap=%s"},
+
+ /* temporary workaround, due to old mount(8)? */
+ {Opt_ignore_silent, "relatime"},
+
+ {Opt_err, NULL}
+};
+
+/* ---------------------------------------------------------------------- */
+
+static const char *au_parser_pattern(int val, match_table_t tbl)
+{
+ struct match_token *p;
+
+ p = tbl;
+ while (p->pattern) {
+ if (p->token == val)
+ return p->pattern;
+ p++;
+ }
+ BUG();
+ return "??";
+}
+
+static const char *au_optstr(int *val, match_table_t tbl)
+{
+ struct match_token *p;
+ int v;
+
+ v = *val;
+ if (!v)
+ goto out;
+ p = tbl;
+ while (p->pattern) {
+ if (p->token
+ && (v & p->token) == p->token) {
+ *val &= ~p->token;
+ return p->pattern;
+ }
+ p++;
+ }
+
+out:
+ return NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t brperm = {
+ {AuBrPerm_RO, AUFS_BRPERM_RO},
+ {AuBrPerm_RR, AUFS_BRPERM_RR},
+ {AuBrPerm_RW, AUFS_BRPERM_RW},
+ {0, NULL}
+};
+
+static match_table_t brattr = {
+ /* general */
+ {AuBrAttr_COO_REG, AUFS_BRATTR_COO_REG},
+ {AuBrAttr_COO_ALL, AUFS_BRATTR_COO_ALL},
+ /* 'unpin' attrib is meaningless since linux-3.18-rc1 */
+ {AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN},
+#ifdef CONFIG_AUFS_FHSM
+ {AuBrAttr_FHSM, AUFS_BRATTR_FHSM},
+#endif
+#ifdef CONFIG_AUFS_XATTR
+ {AuBrAttr_ICEX, AUFS_BRATTR_ICEX},
+ {AuBrAttr_ICEX_SEC, AUFS_BRATTR_ICEX_SEC},
+ {AuBrAttr_ICEX_SYS, AUFS_BRATTR_ICEX_SYS},
+ {AuBrAttr_ICEX_TR, AUFS_BRATTR_ICEX_TR},
+ {AuBrAttr_ICEX_USR, AUFS_BRATTR_ICEX_USR},
+ {AuBrAttr_ICEX_OTH, AUFS_BRATTR_ICEX_OTH},
+#endif
+
+ /* ro/rr branch */
+ {AuBrRAttr_WH, AUFS_BRRATTR_WH},
+
+ /* rw branch */
+ {AuBrWAttr_MOO, AUFS_BRWATTR_MOO},
+ {AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH},
+
+ {0, NULL}
+};
+
+static int br_attr_val(char *str, match_table_t table, substring_t args[])
+{
+ int attr, v;
+ char *p;
+
+ attr = 0;
+ do {
+ p = strchr(str, '+');
+ if (p)
+ *p = 0;
+ v = match_token(str, table, args);
+ if (v) {
+ if (v & AuBrAttr_CMOO_Mask)
+ attr &= ~AuBrAttr_CMOO_Mask;
+ attr |= v;
+ } else {
+ if (p)
+ *p = '+';
+ pr_warn("ignored branch attribute %s\n", str);
+ break;
+ }
+ if (p)
+ str = p + 1;
+ } while (p);
+
+ return attr;
+}
+
+static int au_do_optstr_br_attr(au_br_perm_str_t *str, int perm)
+{
+ int sz;
+ const char *p;
+ char *q;
+
+ q = str->a;
+ *q = 0;
+ p = au_optstr(&perm, brattr);
+ if (p) {
+ sz = strlen(p);
+ memcpy(q, p, sz + 1);
+ q += sz;
+ } else
+ goto out;
+
+ do {
+ p = au_optstr(&perm, brattr);
+ if (p) {
+ *q++ = '+';
+ sz = strlen(p);
+ memcpy(q, p, sz + 1);
+ q += sz;
+ }
+ } while (p);
+
+out:
+ return q - str->a;
+}
+
+static int noinline_for_stack br_perm_val(char *perm)
+{
+ int val, bad, sz;
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ au_br_perm_str_t attr;
+
+ p = strchr(perm, '+');
+ if (p)
+ *p = 0;
+ val = match_token(perm, brperm, args);
+ if (!val) {
+ if (p)
+ *p = '+';
+ pr_warn("ignored branch permission %s\n", perm);
+ val = AuBrPerm_RO;
+ goto out;
+ }
+ if (!p)
+ goto out;
+
+ val |= br_attr_val(p + 1, brattr, args);
+
+ bad = 0;
+ switch (val & AuBrPerm_Mask) {
+ case AuBrPerm_RO:
+ case AuBrPerm_RR:
+ bad = val & AuBrWAttr_Mask;
+ val &= ~AuBrWAttr_Mask;
+ break;
+ case AuBrPerm_RW:
+ bad = val & AuBrRAttr_Mask;
+ val &= ~AuBrRAttr_Mask;
+ break;
+ }
+
+ /*
+ * 'unpin' attrib becomes meaningless since linux-3.18-rc1, but aufs
+ * does not treat it as an error, just warning.
+ * this is a tiny guard for the user operation.
+ */
+ if (val & AuBrAttr_UNPIN) {
+ bad |= AuBrAttr_UNPIN;
+ val &= ~AuBrAttr_UNPIN;
+ }
+
+ if (unlikely(bad)) {
+ sz = au_do_optstr_br_attr(&attr, bad);
+ AuDebugOn(!sz);
+ pr_warn("ignored branch attribute %s\n", attr.a);
+ }
+
+out:
+ return val;
+}
+
+void au_optstr_br_perm(au_br_perm_str_t *str, int perm)
+{
+ au_br_perm_str_t attr;
+ const char *p;
+ char *q;
+ int sz;
+
+ q = str->a;
+ p = au_optstr(&perm, brperm);
+ AuDebugOn(!p || !*p);
+ sz = strlen(p);
+ memcpy(q, p, sz + 1);
+ q += sz;
+
+ sz = au_do_optstr_br_attr(&attr, perm);
+ if (sz) {
+ *q++ = '+';
+ memcpy(q, attr.a, sz + 1);
+ }
+
+ AuDebugOn(strlen(str->a) >= sizeof(str->a));
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t udbalevel = {
+ {AuOpt_UDBA_REVAL, "reval"},
+ {AuOpt_UDBA_NONE, "none"},
+#ifdef CONFIG_AUFS_HNOTIFY
+ {AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */
+#ifdef CONFIG_AUFS_HFSNOTIFY
+ {AuOpt_UDBA_HNOTIFY, "fsnotify"},
+#endif
+#endif
+ {-1, NULL}
+};
+
+static int noinline_for_stack udba_val(char *str)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ return match_token(str, udbalevel, args);
+}
+
+const char *au_optstr_udba(int udba)
+{
+ return au_parser_pattern(udba, udbalevel);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static match_table_t au_wbr_create_policy = {
+ {AuWbrCreate_TDP, "tdp"},
+ {AuWbrCreate_TDP, "top-down-parent"},
+ {AuWbrCreate_RR, "rr"},
+ {AuWbrCreate_RR, "round-robin"},
+ {AuWbrCreate_MFS, "mfs"},
+ {AuWbrCreate_MFS, "most-free-space"},
+ {AuWbrCreate_MFSV, "mfs:%d"},
+ {AuWbrCreate_MFSV, "most-free-space:%d"},
+
+ {AuWbrCreate_MFSRR, "mfsrr:%d"},
+ {AuWbrCreate_MFSRRV, "mfsrr:%d:%d"},
+ {AuWbrCreate_PMFS, "pmfs"},
+ {AuWbrCreate_PMFSV, "pmfs:%d"},
+ {AuWbrCreate_PMFSRR, "pmfsrr:%d"},
+ {AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"},
+
+ {-1, NULL}
+};
+
+/*
+ * cf. linux/lib/parser.c and cmdline.c
+ * gave up calling memparse() since it uses simple_strtoull() instead of
+ * kstrto...().
+ */
+static int noinline_for_stack
+au_match_ull(substring_t *s, unsigned long long *result)
+{
+ int err;
+ unsigned int len;
+ char a[32];
+
+ err = -ERANGE;
+ len = s->to - s->from;
+ if (len + 1 <= sizeof(a)) {
+ memcpy(a, s->from, len);
+ a[len] = '\0';
+ err = kstrtoull(a, 0, result);
+ }
+ return err;
+}
+
+static int au_wbr_mfs_wmark(substring_t *arg, char *str,
+ struct au_opt_wbr_create *create)
+{
+ int err;
+ unsigned long long ull;
+
+ err = 0;
+ if (!au_match_ull(arg, &ull))
+ create->mfsrr_watermark = ull;
+ else {
+ pr_err("bad integer in %s\n", str);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int au_wbr_mfs_sec(substring_t *arg, char *str,
+ struct au_opt_wbr_create *create)
+{
+ int n, err;
+
+ err = 0;
+ if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC)
+ create->mfs_second = n;
+ else {
+ pr_err("bad integer in %s\n", str);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int noinline_for_stack
+au_wbr_create_val(char *str, struct au_opt_wbr_create *create)
+{
+ int err, e;
+ substring_t args[MAX_OPT_ARGS];
+
+ err = match_token(str, au_wbr_create_policy, args);
+ create->wbr_create = err;
+ switch (err) {
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_PMFSRRV:
+ e = au_wbr_mfs_wmark(&args[0], str, create);
+ if (!e)
+ e = au_wbr_mfs_sec(&args[1], str, create);
+ if (unlikely(e))
+ err = e;
+ break;
+ case AuWbrCreate_MFSRR:
+ case AuWbrCreate_PMFSRR:
+ e = au_wbr_mfs_wmark(&args[0], str, create);
+ if (unlikely(e)) {
+ err = e;
+ break;
+ }
+ /*FALLTHROUGH*/
+ case AuWbrCreate_MFS:
+ case AuWbrCreate_PMFS:
+ create->mfs_second = AUFS_MFS_DEF_SEC;
+ break;
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFSV:
+ e = au_wbr_mfs_sec(&args[0], str, create);
+ if (unlikely(e))
+ err = e;
+ break;
+ }
+
+ return err;
+}
+
+const char *au_optstr_wbr_create(int wbr_create)
+{
+ return au_parser_pattern(wbr_create, au_wbr_create_policy);
+}
+
+static match_table_t au_wbr_copyup_policy = {
+ {AuWbrCopyup_TDP, "tdp"},
+ {AuWbrCopyup_TDP, "top-down-parent"},
+ {AuWbrCopyup_BUP, "bup"},
+ {AuWbrCopyup_BUP, "bottom-up-parent"},
+ {AuWbrCopyup_BU, "bu"},
+ {AuWbrCopyup_BU, "bottom-up"},
+ {-1, NULL}
+};
+
+static int noinline_for_stack au_wbr_copyup_val(char *str)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ return match_token(str, au_wbr_copyup_policy, args);
+}
+
+const char *au_optstr_wbr_copyup(int wbr_copyup)
+{
+ return au_parser_pattern(wbr_copyup, au_wbr_copyup_policy);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
+
+static void dump_opts(struct au_opts *opts)
+{
+#ifdef CONFIG_AUFS_DEBUG
+ /* reduce stack space */
+ union {
+ struct au_opt_add *add;
+ struct au_opt_del *del;
+ struct au_opt_mod *mod;
+ struct au_opt_xino *xino;
+ struct au_opt_xino_itrunc *xino_itrunc;
+ struct au_opt_wbr_create *create;
+ } u;
+ struct au_opt *opt;
+
+ opt = opts->opt;
+ while (opt->type != Opt_tail) {
+ switch (opt->type) {
+ case Opt_add:
+ u.add = &opt->add;
+ AuDbg("add {b%d, %s, 0x%x, %p}\n",
+ u.add->bindex, u.add->pathname, u.add->perm,
+ u.add->path.dentry);
+ break;
+ case Opt_del:
+ case Opt_idel:
+ u.del = &opt->del;
+ AuDbg("del {%s, %p}\n",
+ u.del->pathname, u.del->h_path.dentry);
+ break;
+ case Opt_mod:
+ case Opt_imod:
+ u.mod = &opt->mod;
+ AuDbg("mod {%s, 0x%x, %p}\n",
+ u.mod->path, u.mod->perm, u.mod->h_root);
+ break;
+ case Opt_append:
+ u.add = &opt->add;
+ AuDbg("append {b%d, %s, 0x%x, %p}\n",
+ u.add->bindex, u.add->pathname, u.add->perm,
+ u.add->path.dentry);
+ break;
+ case Opt_prepend:
+ u.add = &opt->add;
+ AuDbg("prepend {b%d, %s, 0x%x, %p}\n",
+ u.add->bindex, u.add->pathname, u.add->perm,
+ u.add->path.dentry);
+ break;
+ case Opt_dirwh:
+ AuDbg("dirwh %d\n", opt->dirwh);
+ break;
+ case Opt_rdcache:
+ AuDbg("rdcache %d\n", opt->rdcache);
+ break;
+ case Opt_rdblk:
+ AuDbg("rdblk %u\n", opt->rdblk);
+ break;
+ case Opt_rdblk_def:
+ AuDbg("rdblk_def\n");
+ break;
+ case Opt_rdhash:
+ AuDbg("rdhash %u\n", opt->rdhash);
+ break;
+ case Opt_rdhash_def:
+ AuDbg("rdhash_def\n");
+ break;
+ case Opt_xino:
+ u.xino = &opt->xino;
+ AuDbg("xino {%s %pD}\n", u.xino->path, u.xino->file);
+ break;
+ case Opt_trunc_xino:
+ AuLabel(trunc_xino);
+ break;
+ case Opt_notrunc_xino:
+ AuLabel(notrunc_xino);
+ break;
+ case Opt_trunc_xino_path:
+ case Opt_itrunc_xino:
+ u.xino_itrunc = &opt->xino_itrunc;
+ AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex);
+ break;
+ case Opt_noxino:
+ AuLabel(noxino);
+ break;
+ case Opt_trunc_xib:
+ AuLabel(trunc_xib);
+ break;
+ case Opt_notrunc_xib:
+ AuLabel(notrunc_xib);
+ break;
+ case Opt_shwh:
+ AuLabel(shwh);
+ break;
+ case Opt_noshwh:
+ AuLabel(noshwh);
+ break;
+ case Opt_dirperm1:
+ AuLabel(dirperm1);
+ break;
+ case Opt_nodirperm1:
+ AuLabel(nodirperm1);
+ break;
+ case Opt_plink:
+ AuLabel(plink);
+ break;
+ case Opt_noplink:
+ AuLabel(noplink);
+ break;
+ case Opt_list_plink:
+ AuLabel(list_plink);
+ break;
+ case Opt_udba:
+ AuDbg("udba %d, %s\n",
+ opt->udba, au_optstr_udba(opt->udba));
+ break;
+ case Opt_dio:
+ AuLabel(dio);
+ break;
+ case Opt_nodio:
+ AuLabel(nodio);
+ break;
+ case Opt_diropq_a:
+ AuLabel(diropq_a);
+ break;
+ case Opt_diropq_w:
+ AuLabel(diropq_w);
+ break;
+ case Opt_warn_perm:
+ AuLabel(warn_perm);
+ break;
+ case Opt_nowarn_perm:
+ AuLabel(nowarn_perm);
+ break;
+ case Opt_verbose:
+ AuLabel(verbose);
+ break;
+ case Opt_noverbose:
+ AuLabel(noverbose);
+ break;
+ case Opt_sum:
+ AuLabel(sum);
+ break;
+ case Opt_nosum:
+ AuLabel(nosum);
+ break;
+ case Opt_wsum:
+ AuLabel(wsum);
+ break;
+ case Opt_wbr_create:
+ u.create = &opt->wbr_create;
+ AuDbg("create %d, %s\n", u.create->wbr_create,
+ au_optstr_wbr_create(u.create->wbr_create));
+ switch (u.create->wbr_create) {
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFSV:
+ AuDbg("%d sec\n", u.create->mfs_second);
+ break;
+ case AuWbrCreate_MFSRR:
+ AuDbg("%llu watermark\n",
+ u.create->mfsrr_watermark);
+ break;
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_PMFSRRV:
+ AuDbg("%llu watermark, %d sec\n",
+ u.create->mfsrr_watermark,
+ u.create->mfs_second);
+ break;
+ }
+ break;
+ case Opt_wbr_copyup:
+ AuDbg("copyup %d, %s\n", opt->wbr_copyup,
+ au_optstr_wbr_copyup(opt->wbr_copyup));
+ break;
+ case Opt_fhsm_sec:
+ AuDbg("fhsm_sec %u\n", opt->fhsm_second);
+ break;
+ case Opt_acl:
+ AuLabel(acl);
+ break;
+ case Opt_noacl:
+ AuLabel(noacl);
+ break;
+ default:
+ BUG();
+ }
+ opt++;
+ }
+#endif
+}
+
+void au_opts_free(struct au_opts *opts)
+{
+ struct au_opt *opt;
+
+ opt = opts->opt;
+ while (opt->type != Opt_tail) {
+ switch (opt->type) {
+ case Opt_add:
+ case Opt_append:
+ case Opt_prepend:
+ path_put(&opt->add.path);
+ break;
+ case Opt_del:
+ case Opt_idel:
+ path_put(&opt->del.h_path);
+ break;
+ case Opt_mod:
+ case Opt_imod:
+ dput(opt->mod.h_root);
+ break;
+ case Opt_xino:
+ fput(opt->xino.file);
+ break;
+ }
+ opt++;
+ }
+}
+
+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags,
+ aufs_bindex_t bindex)
+{
+ int err;
+ struct au_opt_add *add = &opt->add;
+ char *p;
+
+ add->bindex = bindex;
+ add->perm = AuBrPerm_RO;
+ add->pathname = opt_str;
+ p = strchr(opt_str, '=');
+ if (p) {
+ *p++ = 0;
+ if (*p)
+ add->perm = br_perm_val(p);
+ }
+
+ err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path);
+ if (!err) {
+ if (!p) {
+ add->perm = AuBrPerm_RO;
+ if (au_test_fs_rr(add->path.dentry->d_sb))
+ add->perm = AuBrPerm_RR;
+ else if (!bindex && !(sb_flags & MS_RDONLY))
+ add->perm = AuBrPerm_RW;
+ }
+ opt->type = Opt_add;
+ goto out;
+ }
+ pr_err("lookup failed %s (%d)\n", add->pathname, err);
+ err = -EINVAL;
+
+out:
+ return err;
+}
+
+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[])
+{
+ int err;
+
+ del->pathname = args[0].from;
+ AuDbg("del path %s\n", del->pathname);
+
+ err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path);
+ if (unlikely(err))
+ pr_err("lookup failed %s (%d)\n", del->pathname, err);
+
+ return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_opt_del *del, substring_t args[])
+{
+ int err;
+ struct dentry *root;
+
+ err = -EINVAL;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_FLUSH);
+ if (bindex < 0 || au_sbend(sb) < bindex) {
+ pr_err("out of bounds, %d\n", bindex);
+ goto out;
+ }
+
+ err = 0;
+ del->h_path.dentry = dget(au_h_dptr(root, bindex));
+ del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex));
+
+out:
+ aufs_read_unlock(root, !AuLock_IR);
+ return err;
+}
+#endif
+
+static int noinline_for_stack
+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[])
+{
+ int err;
+ struct path path;
+ char *p;
+
+ err = -EINVAL;
+ mod->path = args[0].from;
+ p = strchr(mod->path, '=');
+ if (unlikely(!p)) {
+ pr_err("no permssion %s\n", args[0].from);
+ goto out;
+ }
+
+ *p++ = 0;
+ err = vfsub_kern_path(mod->path, lkup_dirflags, &path);
+ if (unlikely(err)) {
+ pr_err("lookup failed %s (%d)\n", mod->path, err);
+ goto out;
+ }
+
+ mod->perm = br_perm_val(p);
+ AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p);
+ mod->h_root = dget(path.dentry);
+ path_put(&path);
+
+out:
+ return err;
+}
+
+#if 0 /* reserved for future use */
+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex,
+ struct au_opt_mod *mod, substring_t args[])
+{
+ int err;
+ struct dentry *root;
+
+ err = -EINVAL;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_FLUSH);
+ if (bindex < 0 || au_sbend(sb) < bindex) {
+ pr_err("out of bounds, %d\n", bindex);
+ goto out;
+ }
+
+ err = 0;
+ mod->perm = br_perm_val(args[1].from);
+ AuDbg("mod path %s, perm 0x%x, %s\n",
+ mod->path, mod->perm, args[1].from);
+ mod->h_root = dget(au_h_dptr(root, bindex));
+
+out:
+ aufs_read_unlock(root, !AuLock_IR);
+ return err;
+}
+#endif
+
+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino,
+ substring_t args[])
+{
+ int err;
+ struct file *file;
+
+ file = au_xino_create(sb, args[0].from, /*silent*/0);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+
+ err = -EINVAL;
+ if (unlikely(file->f_path.dentry->d_sb == sb)) {
+ fput(file);
+ pr_err("%s must be outside\n", args[0].from);
+ goto out;
+ }
+
+ err = 0;
+ xino->file = file;
+ xino->path = args[0].from;
+
+out:
+ return err;
+}
+
+static int noinline_for_stack
+au_opts_parse_xino_itrunc_path(struct super_block *sb,
+ struct au_opt_xino_itrunc *xino_itrunc,
+ substring_t args[])
+{
+ int err;
+ aufs_bindex_t bend, bindex;
+ struct path path;
+ struct dentry *root;
+
+ err = vfsub_kern_path(args[0].from, lkup_dirflags, &path);
+ if (unlikely(err)) {
+ pr_err("lookup failed %s (%d)\n", args[0].from, err);
+ goto out;
+ }
+
+ xino_itrunc->bindex = -1;
+ root = sb->s_root;
+ aufs_read_lock(root, AuLock_FLUSH);
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ if (au_h_dptr(root, bindex) == path.dentry) {
+ xino_itrunc->bindex = bindex;
+ break;
+ }
+ }
+ aufs_read_unlock(root, !AuLock_IR);
+ path_put(&path);
+
+ if (unlikely(xino_itrunc->bindex < 0)) {
+ pr_err("no such branch %s\n", args[0].from);
+ err = -EINVAL;
+ }
+
+out:
+ return err;
+}
+
+/* called without aufs lock */
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts)
+{
+ int err, n, token;
+ aufs_bindex_t bindex;
+ unsigned char skipped;
+ struct dentry *root;
+ struct au_opt *opt, *opt_tail;
+ char *opt_str;
+ /* reduce the stack space */
+ union {
+ struct au_opt_xino_itrunc *xino_itrunc;
+ struct au_opt_wbr_create *create;
+ } u;
+ struct {
+ substring_t args[MAX_OPT_ARGS];
+ } *a;
+
+ err = -ENOMEM;
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (unlikely(!a))
+ goto out;
+
+ root = sb->s_root;
+ err = 0;
+ bindex = 0;
+ opt = opts->opt;
+ opt_tail = opt + opts->max_opt - 1;
+ opt->type = Opt_tail;
+ while (!err && (opt_str = strsep(&str, ",")) && *opt_str) {
+ err = -EINVAL;
+ skipped = 0;
+ token = match_token(opt_str, options, a->args);
+ switch (token) {
+ case Opt_br:
+ err = 0;
+ while (!err && (opt_str = strsep(&a->args[0].from, ":"))
+ && *opt_str) {
+ err = opt_add(opt, opt_str, opts->sb_flags,
+ bindex++);
+ if (unlikely(!err && ++opt > opt_tail)) {
+ err = -E2BIG;
+ break;
+ }
+ opt->type = Opt_tail;
+ skipped = 1;
+ }
+ break;
+ case Opt_add:
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ bindex = n;
+ err = opt_add(opt, a->args[1].from, opts->sb_flags,
+ bindex);
+ if (!err)
+ opt->type = token;
+ break;
+ case Opt_append:
+ err = opt_add(opt, a->args[0].from, opts->sb_flags,
+ /*dummy bindex*/1);
+ if (!err)
+ opt->type = token;
+ break;
+ case Opt_prepend:
+ err = opt_add(opt, a->args[0].from, opts->sb_flags,
+ /*bindex*/0);
+ if (!err)
+ opt->type = token;
+ break;
+ case Opt_del:
+ err = au_opts_parse_del(&opt->del, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#if 0 /* reserved for future use */
+ case Opt_idel:
+ del->pathname = "(indexed)";
+ if (unlikely(match_int(&args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ err = au_opts_parse_idel(sb, n, &opt->del, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#endif
+ case Opt_mod:
+ err = au_opts_parse_mod(&opt->mod, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#ifdef IMOD /* reserved for future use */
+ case Opt_imod:
+ u.mod->path = "(indexed)";
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ err = au_opts_parse_imod(sb, n, &opt->mod, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+#endif
+ case Opt_xino:
+ err = au_opts_parse_xino(sb, &opt->xino, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+
+ case Opt_trunc_xino_path:
+ err = au_opts_parse_xino_itrunc_path
+ (sb, &opt->xino_itrunc, a->args);
+ if (!err)
+ opt->type = token;
+ break;
+
+ case Opt_itrunc_xino:
+ u.xino_itrunc = &opt->xino_itrunc;
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ u.xino_itrunc->bindex = n;
+ aufs_read_lock(root, AuLock_FLUSH);
+ if (n < 0 || au_sbend(sb) < n) {
+ pr_err("out of bounds, %d\n", n);
+ aufs_read_unlock(root, !AuLock_IR);
+ break;
+ }
+ aufs_read_unlock(root, !AuLock_IR);
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_dirwh:
+ if (unlikely(match_int(&a->args[0], &opt->dirwh)))
+ break;
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_rdcache:
+ if (unlikely(match_int(&a->args[0], &n))) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ if (unlikely(n > AUFS_RDCACHE_MAX)) {
+ pr_err("rdcache must be smaller than %d\n",
+ AUFS_RDCACHE_MAX);
+ break;
+ }
+ opt->rdcache = n;
+ err = 0;
+ opt->type = token;
+ break;
+ case Opt_rdblk:
+ if (unlikely(match_int(&a->args[0], &n)
+ || n < 0
+ || n > KMALLOC_MAX_SIZE)) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ if (unlikely(n && n < NAME_MAX)) {
+ pr_err("rdblk must be larger than %d\n",
+ NAME_MAX);
+ break;
+ }
+ opt->rdblk = n;
+ err = 0;
+ opt->type = token;
+ break;
+ case Opt_rdhash:
+ if (unlikely(match_int(&a->args[0], &n)
+ || n < 0
+ || n * sizeof(struct hlist_head)
+ > KMALLOC_MAX_SIZE)) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ opt->rdhash = n;
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_trunc_xino:
+ case Opt_notrunc_xino:
+ case Opt_noxino:
+ case Opt_trunc_xib:
+ case Opt_notrunc_xib:
+ case Opt_shwh:
+ case Opt_noshwh:
+ case Opt_dirperm1:
+ case Opt_nodirperm1:
+ case Opt_plink:
+ case Opt_noplink:
+ case Opt_list_plink:
+ case Opt_dio:
+ case Opt_nodio:
+ case Opt_diropq_a:
+ case Opt_diropq_w:
+ case Opt_warn_perm:
+ case Opt_nowarn_perm:
+ case Opt_verbose:
+ case Opt_noverbose:
+ case Opt_sum:
+ case Opt_nosum:
+ case Opt_wsum:
+ case Opt_rdblk_def:
+ case Opt_rdhash_def:
+ case Opt_acl:
+ case Opt_noacl:
+ err = 0;
+ opt->type = token;
+ break;
+
+ case Opt_udba:
+ opt->udba = udba_val(a->args[0].from);
+ if (opt->udba >= 0) {
+ err = 0;
+ opt->type = token;
+ } else
+ pr_err("wrong value, %s\n", opt_str);
+ break;
+
+ case Opt_wbr_create:
+ u.create = &opt->wbr_create;
+ u.create->wbr_create
+ = au_wbr_create_val(a->args[0].from, u.create);
+ if (u.create->wbr_create >= 0) {
+ err = 0;
+ opt->type = token;
+ } else
+ pr_err("wrong value, %s\n", opt_str);
+ break;
+ case Opt_wbr_copyup:
+ opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from);
+ if (opt->wbr_copyup >= 0) {
+ err = 0;
+ opt->type = token;
+ } else
+ pr_err("wrong value, %s\n", opt_str);
+ break;
+
+ case Opt_fhsm_sec:
+ if (unlikely(match_int(&a->args[0], &n)
+ || n < 0)) {
+ pr_err("bad integer in %s\n", opt_str);
+ break;
+ }
+ if (sysaufs_brs) {
+ opt->fhsm_second = n;
+ opt->type = token;
+ } else
+ pr_warn("ignored %s\n", opt_str);
+ err = 0;
+ break;
+
+ case Opt_ignore:
+ pr_warn("ignored %s\n", opt_str);
+ /*FALLTHROUGH*/
+ case Opt_ignore_silent:
+ skipped = 1;
+ err = 0;
+ break;
+ case Opt_err:
+ pr_err("unknown option %s\n", opt_str);
+ break;
+ }
+
+ if (!err && !skipped) {
+ if (unlikely(++opt > opt_tail)) {
+ err = -E2BIG;
+ opt--;
+ opt->type = Opt_tail;
+ break;
+ }
+ opt->type = Opt_tail;
+ }
+ }
+
+ kfree(a);
+ dump_opts(opts);
+ if (unlikely(err))
+ au_opts_free(opts);
+
+out:
+ return err;
+}
+
+static int au_opt_wbr_create(struct super_block *sb,
+ struct au_opt_wbr_create *create)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = 1; /* handled */
+ sbinfo = au_sbi(sb);
+ if (sbinfo->si_wbr_create_ops->fin) {
+ err = sbinfo->si_wbr_create_ops->fin(sb);
+ if (!err)
+ err = 1;
+ }
+
+ sbinfo->si_wbr_create = create->wbr_create;
+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create;
+ switch (create->wbr_create) {
+ case AuWbrCreate_MFSRRV:
+ case AuWbrCreate_MFSRR:
+ case AuWbrCreate_PMFSRR:
+ case AuWbrCreate_PMFSRRV:
+ sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark;
+ /*FALLTHROUGH*/
+ case AuWbrCreate_MFS:
+ case AuWbrCreate_MFSV:
+ case AuWbrCreate_PMFS:
+ case AuWbrCreate_PMFSV:
+ sbinfo->si_wbr_mfs.mfs_expire
+ = msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC);
+ break;
+ }
+
+ if (sbinfo->si_wbr_create_ops->init)
+ sbinfo->si_wbr_create_ops->init(sb); /* ignore */
+
+ return err;
+}
+
+/*
+ * returns,
+ * plus: processed without an error
+ * zero: unprocessed
+ */
+static int au_opt_simple(struct super_block *sb, struct au_opt *opt,
+ struct au_opts *opts)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = 1; /* handled */
+ sbinfo = au_sbi(sb);
+ switch (opt->type) {
+ case Opt_udba:
+ sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+ sbinfo->si_mntflags |= opt->udba;
+ opts->given_udba |= opt->udba;
+ break;
+
+ case Opt_plink:
+ au_opt_set(sbinfo->si_mntflags, PLINK);
+ break;
+ case Opt_noplink:
+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
+ au_plink_put(sb, /*verbose*/1);
+ au_opt_clr(sbinfo->si_mntflags, PLINK);
+ break;
+ case Opt_list_plink:
+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
+ au_plink_list(sb);
+ break;
+
+ case Opt_dio:
+ au_opt_set(sbinfo->si_mntflags, DIO);
+ au_fset_opts(opts->flags, REFRESH_DYAOP);
+ break;
+ case Opt_nodio:
+ au_opt_clr(sbinfo->si_mntflags, DIO);
+ au_fset_opts(opts->flags, REFRESH_DYAOP);
+ break;
+
+ case Opt_fhsm_sec:
+ au_fhsm_set(sbinfo, opt->fhsm_second);
+ break;
+
+ case Opt_diropq_a:
+ au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+ break;
+ case Opt_diropq_w:
+ au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ);
+ break;
+
+ case Opt_warn_perm:
+ au_opt_set(sbinfo->si_mntflags, WARN_PERM);
+ break;
+ case Opt_nowarn_perm:
+ au_opt_clr(sbinfo->si_mntflags, WARN_PERM);
+ break;
+
+ case Opt_verbose:
+ au_opt_set(sbinfo->si_mntflags, VERBOSE);
+ break;
+ case Opt_noverbose:
+ au_opt_clr(sbinfo->si_mntflags, VERBOSE);
+ break;
+
+ case Opt_sum:
+ au_opt_set(sbinfo->si_mntflags, SUM);
+ break;
+ case Opt_wsum:
+ au_opt_clr(sbinfo->si_mntflags, SUM);
+ au_opt_set(sbinfo->si_mntflags, SUM_W);
+ case Opt_nosum:
+ au_opt_clr(sbinfo->si_mntflags, SUM);
+ au_opt_clr(sbinfo->si_mntflags, SUM_W);
+ break;
+
+ case Opt_wbr_create:
+ err = au_opt_wbr_create(sb, &opt->wbr_create);
+ break;
+ case Opt_wbr_copyup:
+ sbinfo->si_wbr_copyup = opt->wbr_copyup;
+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup;
+ break;
+
+ case Opt_dirwh:
+ sbinfo->si_dirwh = opt->dirwh;
+ break;
+
+ case Opt_rdcache:
+ sbinfo->si_rdcache
+ = msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC);
+ break;
+ case Opt_rdblk:
+ sbinfo->si_rdblk = opt->rdblk;
+ break;
+ case Opt_rdblk_def:
+ sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+ break;
+ case Opt_rdhash:
+ sbinfo->si_rdhash = opt->rdhash;
+ break;
+ case Opt_rdhash_def:
+ sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+ break;
+
+ case Opt_shwh:
+ au_opt_set(sbinfo->si_mntflags, SHWH);
+ break;
+ case Opt_noshwh:
+ au_opt_clr(sbinfo->si_mntflags, SHWH);
+ break;
+
+ case Opt_dirperm1:
+ au_opt_set(sbinfo->si_mntflags, DIRPERM1);
+ break;
+ case Opt_nodirperm1:
+ au_opt_clr(sbinfo->si_mntflags, DIRPERM1);
+ break;
+
+ case Opt_trunc_xino:
+ au_opt_set(sbinfo->si_mntflags, TRUNC_XINO);
+ break;
+ case Opt_notrunc_xino:
+ au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO);
+ break;
+
+ case Opt_trunc_xino_path:
+ case Opt_itrunc_xino:
+ err = au_xino_trunc(sb, opt->xino_itrunc.bindex);
+ if (!err)
+ err = 1;
+ break;
+
+ case Opt_trunc_xib:
+ au_fset_opts(opts->flags, TRUNC_XIB);
+ break;
+ case Opt_notrunc_xib:
+ au_fclr_opts(opts->flags, TRUNC_XIB);
+ break;
+
+ case Opt_acl:
+ sb->s_flags |= MS_POSIXACL;
+ break;
+ case Opt_noacl:
+ sb->s_flags &= ~MS_POSIXACL;
+ break;
+
+ default:
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * returns tri-state.
+ * plus: processed without an error
+ * zero: unprocessed
+ * minus: error
+ */
+static int au_opt_br(struct super_block *sb, struct au_opt *opt,
+ struct au_opts *opts)
+{
+ int err, do_refresh;
+
+ err = 0;
+ switch (opt->type) {
+ case Opt_append:
+ opt->add.bindex = au_sbend(sb) + 1;
+ if (opt->add.bindex < 0)
+ opt->add.bindex = 0;
+ goto add;
+ case Opt_prepend:
+ opt->add.bindex = 0;
+ add: /* indented label */
+ case Opt_add:
+ err = au_br_add(sb, &opt->add,
+ au_ftest_opts(opts->flags, REMOUNT));
+ if (!err) {
+ err = 1;
+ au_fset_opts(opts->flags, REFRESH);
+ }
+ break;
+
+ case Opt_del:
+ case Opt_idel:
+ err = au_br_del(sb, &opt->del,
+ au_ftest_opts(opts->flags, REMOUNT));
+ if (!err) {
+ err = 1;
+ au_fset_opts(opts->flags, TRUNC_XIB);
+ au_fset_opts(opts->flags, REFRESH);
+ }
+ break;
+
+ case Opt_mod:
+ case Opt_imod:
+ err = au_br_mod(sb, &opt->mod,
+ au_ftest_opts(opts->flags, REMOUNT),
+ &do_refresh);
+ if (!err) {
+ err = 1;
+ if (do_refresh)
+ au_fset_opts(opts->flags, REFRESH);
+ }
+ break;
+ }
+
+ return err;
+}
+
+static int au_opt_xino(struct super_block *sb, struct au_opt *opt,
+ struct au_opt_xino **opt_xino,
+ struct au_opts *opts)
+{
+ int err;
+ aufs_bindex_t bend, bindex;
+ struct dentry *root, *parent, *h_root;
+
+ err = 0;
+ switch (opt->type) {
+ case Opt_xino:
+ err = au_xino_set(sb, &opt->xino,
+ !!au_ftest_opts(opts->flags, REMOUNT));
+ if (unlikely(err))
+ break;
+
+ *opt_xino = &opt->xino;
+ au_xino_brid_set(sb, -1);
+
+ /* safe d_parent access */
+ parent = opt->xino.file->f_path.dentry->d_parent;
+ root = sb->s_root;
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ h_root = au_h_dptr(root, bindex);
+ if (h_root == parent) {
+ au_xino_brid_set(sb, au_sbr_id(sb, bindex));
+ break;
+ }
+ }
+ break;
+
+ case Opt_noxino:
+ au_xino_clr(sb);
+ au_xino_brid_set(sb, -1);
+ *opt_xino = (void *)-1;
+ break;
+ }
+
+ return err;
+}
+
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+ unsigned int pending)
+{
+ int err, fhsm;
+ aufs_bindex_t bindex, bend;
+ unsigned char do_plink, skip, do_free, can_no_dreval;
+ struct au_branch *br;
+ struct au_wbr *wbr;
+ struct dentry *root, *dentry;
+ struct inode *dir, *h_dir;
+ struct au_sbinfo *sbinfo;
+ struct au_hinode *hdir;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA));
+
+ if (!(sb_flags & MS_RDONLY)) {
+ if (unlikely(!au_br_writable(au_sbr_perm(sb, 0))))
+ pr_warn("first branch should be rw\n");
+ if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH)))
+ pr_warn_once("shwh should be used with ro\n");
+ }
+
+ if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY)
+ && !au_opt_test(sbinfo->si_mntflags, XINO))
+ pr_warn_once("udba=*notify requires xino\n");
+
+ if (au_opt_test(sbinfo->si_mntflags, DIRPERM1))
+ pr_warn_once("dirperm1 breaks the protection"
+ " by the permission bits on the lower branch\n");
+
+ err = 0;
+ fhsm = 0;
+ root = sb->s_root;
+ dir = d_inode(root);
+ do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK);
+ can_no_dreval = !!au_opt_test((sbinfo->si_mntflags | pending),
+ UDBA_NONE);
+ bend = au_sbend(sb);
+ for (bindex = 0; !err && bindex <= bend; bindex++) {
+ skip = 0;
+ h_dir = au_h_iptr(dir, bindex);
+ br = au_sbr(sb, bindex);
+
+ if ((br->br_perm & AuBrAttr_ICEX)
+ && !h_dir->i_op->listxattr)
+ br->br_perm &= ~AuBrAttr_ICEX;
+#if 0
+ if ((br->br_perm & AuBrAttr_ICEX_SEC)
+ && (au_br_sb(br)->s_flags & MS_NOSEC))
+ br->br_perm &= ~AuBrAttr_ICEX_SEC;
+#endif
+
+ do_free = 0;
+ wbr = br->br_wbr;
+ if (wbr)
+ wbr_wh_read_lock(wbr);
+
+ if (!au_br_writable(br->br_perm)) {
+ do_free = !!wbr;
+ skip = (!wbr
+ || (!wbr->wbr_whbase
+ && !wbr->wbr_plink
+ && !wbr->wbr_orph));
+ } else if (!au_br_wh_linkable(br->br_perm)) {
+ /* skip = (!br->br_whbase && !br->br_orph); */
+ skip = (!wbr || !wbr->wbr_whbase);
+ if (skip && wbr) {
+ if (do_plink)
+ skip = !!wbr->wbr_plink;
+ else
+ skip = !wbr->wbr_plink;
+ }
+ } else {
+ /* skip = (br->br_whbase && br->br_ohph); */
+ skip = (wbr && wbr->wbr_whbase);
+ if (skip) {
+ if (do_plink)
+ skip = !!wbr->wbr_plink;
+ else
+ skip = !wbr->wbr_plink;
+ }
+ }
+ if (wbr)
+ wbr_wh_read_unlock(wbr);
+
+ if (can_no_dreval) {
+ dentry = br->br_path.dentry;
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags &
+ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE))
+ can_no_dreval = 0;
+ spin_unlock(&dentry->d_lock);
+ }
+
+ if (au_br_fhsm(br->br_perm)) {
+ fhsm++;
+ AuDebugOn(!br->br_fhsm);
+ }
+
+ if (skip)
+ continue;
+
+ hdir = au_hi(dir, bindex);
+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+ if (wbr)
+ wbr_wh_write_lock(wbr);
+ err = au_wh_init(br, sb);
+ if (wbr)
+ wbr_wh_write_unlock(wbr);
+ au_hn_imtx_unlock(hdir);
+
+ if (!err && do_free) {
+ kfree(wbr);
+ br->br_wbr = NULL;
+ }
+ }
+
+ if (can_no_dreval)
+ au_fset_si(sbinfo, NO_DREVAL);
+ else
+ au_fclr_si(sbinfo, NO_DREVAL);
+
+ if (fhsm >= 2) {
+ au_fset_si(sbinfo, FHSM);
+ for (bindex = bend; bindex >= 0; bindex--) {
+ br = au_sbr(sb, bindex);
+ if (au_br_fhsm(br->br_perm)) {
+ au_fhsm_set_bottom(sb, bindex);
+ break;
+ }
+ }
+ } else {
+ au_fclr_si(sbinfo, FHSM);
+ au_fhsm_set_bottom(sb, -1);
+ }
+
+ return err;
+}
+
+int au_opts_mount(struct super_block *sb, struct au_opts *opts)
+{
+ int err;
+ unsigned int tmp;
+ aufs_bindex_t bindex, bend;
+ struct au_opt *opt;
+ struct au_opt_xino *opt_xino, xino;
+ struct au_sbinfo *sbinfo;
+ struct au_branch *br;
+ struct inode *dir;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ opt_xino = NULL;
+ opt = opts->opt;
+ while (err >= 0 && opt->type != Opt_tail)
+ err = au_opt_simple(sb, opt++, opts);
+ if (err > 0)
+ err = 0;
+ else if (unlikely(err < 0))
+ goto out;
+
+ /* disable xino and udba temporary */
+ sbinfo = au_sbi(sb);
+ tmp = sbinfo->si_mntflags;
+ au_opt_clr(sbinfo->si_mntflags, XINO);
+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL);
+
+ opt = opts->opt;
+ while (err >= 0 && opt->type != Opt_tail)
+ err = au_opt_br(sb, opt++, opts);
+ if (err > 0)
+ err = 0;
+ else if (unlikely(err < 0))
+ goto out;
+
+ bend = au_sbend(sb);
+ if (unlikely(bend < 0)) {
+ err = -EINVAL;
+ pr_err("no branches\n");
+ goto out;
+ }
+
+ if (au_opt_test(tmp, XINO))
+ au_opt_set(sbinfo->si_mntflags, XINO);
+ opt = opts->opt;
+ while (!err && opt->type != Opt_tail)
+ err = au_opt_xino(sb, opt++, &opt_xino, opts);
+ if (unlikely(err))
+ goto out;
+
+ err = au_opts_verify(sb, sb->s_flags, tmp);
+ if (unlikely(err))
+ goto out;
+
+ /* restore xino */
+ if (au_opt_test(tmp, XINO) && !opt_xino) {
+ xino.file = au_xino_def(sb);
+ err = PTR_ERR(xino.file);
+ if (IS_ERR(xino.file))
+ goto out;
+
+ err = au_xino_set(sb, &xino, /*remount*/0);
+ fput(xino.file);
+ if (unlikely(err))
+ goto out;
+ }
+
+ /* restore udba */
+ tmp &= AuOptMask_UDBA;
+ sbinfo->si_mntflags &= ~AuOptMask_UDBA;
+ sbinfo->si_mntflags |= tmp;
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ err = au_hnotify_reset_br(tmp, br, br->br_perm);
+ if (unlikely(err))
+ AuIOErr("hnotify failed on br %d, %d, ignored\n",
+ bindex, err);
+ /* go on even if err */
+ }
+ if (au_opt_test(tmp, UDBA_HNOTIFY)) {
+ dir = d_inode(sb->s_root);
+ au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO);
+ }
+
+out:
+ return err;
+}
+
+int au_opts_remount(struct super_block *sb, struct au_opts *opts)
+{
+ int err, rerr;
+ unsigned char no_dreval;
+ struct inode *dir;
+ struct au_opt_xino *opt_xino;
+ struct au_opt *opt;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ dir = d_inode(sb->s_root);
+ sbinfo = au_sbi(sb);
+ opt_xino = NULL;
+ opt = opts->opt;
+ while (err >= 0 && opt->type != Opt_tail) {
+ err = au_opt_simple(sb, opt, opts);
+ if (!err)
+ err = au_opt_br(sb, opt, opts);
+ if (!err)
+ err = au_opt_xino(sb, opt, &opt_xino, opts);
+ opt++;
+ }
+ if (err > 0)
+ err = 0;
+ AuTraceErr(err);
+ /* go on even err */
+
+ no_dreval = !!au_ftest_si(sbinfo, NO_DREVAL);
+ rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0);
+ if (unlikely(rerr && !err))
+ err = rerr;
+
+ if (no_dreval != !!au_ftest_si(sbinfo, NO_DREVAL))
+ au_fset_opts(opts->flags, REFRESH_IDOP);
+
+ if (au_ftest_opts(opts->flags, TRUNC_XIB)) {
+ rerr = au_xib_trunc(sb);
+ if (unlikely(rerr && !err))
+ err = rerr;
+ }
+
+ /* will be handled by the caller */
+ if (!au_ftest_opts(opts->flags, REFRESH)
+ && (opts->given_udba
+ || au_opt_test(sbinfo->si_mntflags, XINO)
+ || au_ftest_opts(opts->flags, REFRESH_IDOP)
+ ))
+ au_fset_opts(opts->flags, REFRESH);
+
+ AuDbg("status 0x%x\n", opts->flags);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_opt_udba(struct super_block *sb)
+{
+ return au_mntflags(sb) & AuOptMask_UDBA;
+}
diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h
new file mode 100644
index 000000000..0d6c2e1c7
--- /dev/null
+++ b/fs/aufs/opts.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * mount options/flags
+ */
+
+#ifndef __AUFS_OPTS_H__
+#define __AUFS_OPTS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/path.h>
+
+struct file;
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/* mount flags */
+#define AuOpt_XINO 1 /* external inode number bitmap
+ and translation table */
+#define AuOpt_TRUNC_XINO (1 << 1) /* truncate xino files */
+#define AuOpt_UDBA_NONE (1 << 2) /* users direct branch access */
+#define AuOpt_UDBA_REVAL (1 << 3)
+#define AuOpt_UDBA_HNOTIFY (1 << 4)
+#define AuOpt_SHWH (1 << 5) /* show whiteout */
+#define AuOpt_PLINK (1 << 6) /* pseudo-link */
+#define AuOpt_DIRPERM1 (1 << 7) /* ignore the lower dir's perm
+ bits */
+#define AuOpt_ALWAYS_DIROPQ (1 << 9) /* policy to creating diropq */
+#define AuOpt_SUM (1 << 10) /* summation for statfs(2) */
+#define AuOpt_SUM_W (1 << 11) /* unimplemented */
+#define AuOpt_WARN_PERM (1 << 12) /* warn when add-branch */
+#define AuOpt_VERBOSE (1 << 13) /* busy inode when del-branch */
+#define AuOpt_DIO (1 << 14) /* direct io */
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuOpt_UDBA_HNOTIFY
+#define AuOpt_UDBA_HNOTIFY 0
+#endif
+#ifndef CONFIG_AUFS_SHWH
+#undef AuOpt_SHWH
+#define AuOpt_SHWH 0
+#endif
+
+#define AuOpt_Def (AuOpt_XINO \
+ | AuOpt_UDBA_REVAL \
+ | AuOpt_PLINK \
+ /* | AuOpt_DIRPERM1 */ \
+ | AuOpt_WARN_PERM)
+#define AuOptMask_UDBA (AuOpt_UDBA_NONE \
+ | AuOpt_UDBA_REVAL \
+ | AuOpt_UDBA_HNOTIFY)
+
+#define au_opt_test(flags, name) (flags & AuOpt_##name)
+#define au_opt_set(flags, name) do { \
+ BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \
+ ((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_set_udba(flags, name) do { \
+ (flags) &= ~AuOptMask_UDBA; \
+ ((flags) |= AuOpt_##name); \
+} while (0)
+#define au_opt_clr(flags, name) do { \
+ ((flags) &= ~AuOpt_##name); \
+} while (0)
+
+static inline unsigned int au_opts_plink(unsigned int mntflags)
+{
+#ifdef CONFIG_PROC_FS
+ return mntflags;
+#else
+ return mntflags & ~AuOpt_PLINK;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies to select one among multiple writable branches */
+enum {
+ AuWbrCreate_TDP, /* top down parent */
+ AuWbrCreate_RR, /* round robin */
+ AuWbrCreate_MFS, /* most free space */
+ AuWbrCreate_MFSV, /* mfs with seconds */
+ AuWbrCreate_MFSRR, /* mfs then rr */
+ AuWbrCreate_MFSRRV, /* mfs then rr with seconds */
+ AuWbrCreate_PMFS, /* parent and mfs */
+ AuWbrCreate_PMFSV, /* parent and mfs with seconds */
+ AuWbrCreate_PMFSRR, /* parent, mfs and round-robin */
+ AuWbrCreate_PMFSRRV, /* plus seconds */
+
+ AuWbrCreate_Def = AuWbrCreate_TDP
+};
+
+enum {
+ AuWbrCopyup_TDP, /* top down parent */
+ AuWbrCopyup_BUP, /* bottom up parent */
+ AuWbrCopyup_BU, /* bottom up */
+
+ AuWbrCopyup_Def = AuWbrCopyup_TDP
+};
+
+/* ---------------------------------------------------------------------- */
+
+struct au_opt_add {
+ aufs_bindex_t bindex;
+ char *pathname;
+ int perm;
+ struct path path;
+};
+
+struct au_opt_del {
+ char *pathname;
+ struct path h_path;
+};
+
+struct au_opt_mod {
+ char *path;
+ int perm;
+ struct dentry *h_root;
+};
+
+struct au_opt_xino {
+ char *path;
+ struct file *file;
+};
+
+struct au_opt_xino_itrunc {
+ aufs_bindex_t bindex;
+};
+
+struct au_opt_wbr_create {
+ int wbr_create;
+ int mfs_second;
+ unsigned long long mfsrr_watermark;
+};
+
+struct au_opt {
+ int type;
+ union {
+ struct au_opt_xino xino;
+ struct au_opt_xino_itrunc xino_itrunc;
+ struct au_opt_add add;
+ struct au_opt_del del;
+ struct au_opt_mod mod;
+ int dirwh;
+ int rdcache;
+ unsigned int rdblk;
+ unsigned int rdhash;
+ int udba;
+ struct au_opt_wbr_create wbr_create;
+ int wbr_copyup;
+ unsigned int fhsm_second;
+ };
+};
+
+/* opts flags */
+#define AuOpts_REMOUNT 1
+#define AuOpts_REFRESH (1 << 1)
+#define AuOpts_TRUNC_XIB (1 << 2)
+#define AuOpts_REFRESH_DYAOP (1 << 3)
+#define AuOpts_REFRESH_IDOP (1 << 4)
+#define au_ftest_opts(flags, name) ((flags) & AuOpts_##name)
+#define au_fset_opts(flags, name) \
+ do { (flags) |= AuOpts_##name; } while (0)
+#define au_fclr_opts(flags, name) \
+ do { (flags) &= ~AuOpts_##name; } while (0)
+
+struct au_opts {
+ struct au_opt *opt;
+ int max_opt;
+
+ unsigned int given_udba;
+ unsigned int flags;
+ unsigned long sb_flags;
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* opts.c */
+void au_optstr_br_perm(au_br_perm_str_t *str, int perm);
+const char *au_optstr_udba(int udba);
+const char *au_optstr_wbr_copyup(int wbr_copyup);
+const char *au_optstr_wbr_create(int wbr_create);
+
+void au_opts_free(struct au_opts *opts);
+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts);
+int au_opts_verify(struct super_block *sb, unsigned long sb_flags,
+ unsigned int pending);
+int au_opts_mount(struct super_block *sb, struct au_opts *opts);
+int au_opts_remount(struct super_block *sb, struct au_opts *opts);
+
+unsigned int au_opt_udba(struct super_block *sb);
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_OPTS_H__ */
diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c
new file mode 100644
index 000000000..d5b36bd11
--- /dev/null
+++ b/fs/aufs/plink.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * pseudo-link
+ */
+
+#include "aufs.h"
+
+/*
+ * the pseudo-link maintenance mode.
+ * during a user process maintains the pseudo-links,
+ * prohibit adding a new plink and branch manipulation.
+ *
+ * Flags
+ * NOPLM:
+ * For entry functions which will handle plink, and i_mutex is already held
+ * in VFS.
+ * They cannot wait and should return an error at once.
+ * Callers has to check the error.
+ * NOPLMW:
+ * For entry functions which will handle plink, but i_mutex is not held
+ * in VFS.
+ * They can wait the plink maintenance mode to finish.
+ *
+ * They behave like F_SETLK and F_SETLKW.
+ * If the caller never handle plink, then both flags are unnecessary.
+ */
+
+int au_plink_maint(struct super_block *sb, int flags)
+{
+ int err;
+ pid_t pid, ppid;
+ struct au_sbinfo *sbi;
+
+ SiMustAnyLock(sb);
+
+ err = 0;
+ if (!au_opt_test(au_mntflags(sb), PLINK))
+ goto out;
+
+ sbi = au_sbi(sb);
+ pid = sbi->si_plink_maint_pid;
+ if (!pid || pid == current->pid)
+ goto out;
+
+ /* todo: it highly depends upon /sbin/mount.aufs */
+ rcu_read_lock();
+ ppid = task_pid_vnr(rcu_dereference(current->real_parent));
+ rcu_read_unlock();
+ if (pid == ppid)
+ goto out;
+
+ if (au_ftest_lock(flags, NOPLMW)) {
+ /* if there is no i_mutex lock in VFS, we don't need to wait */
+ /* AuDebugOn(!lockdep_depth(current)); */
+ while (sbi->si_plink_maint_pid) {
+ si_read_unlock(sb);
+ /* gave up wake_up_bit() */
+ wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid);
+
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&sbi->si_nowait);
+ si_noflush_read_lock(sb);
+ }
+ } else if (au_ftest_lock(flags, NOPLM)) {
+ AuDbg("ppid %d, pid %d\n", ppid, pid);
+ err = -EAGAIN;
+ }
+
+out:
+ return err;
+}
+
+void au_plink_maint_leave(struct au_sbinfo *sbinfo)
+{
+ spin_lock(&sbinfo->si_plink_maint_lock);
+ sbinfo->si_plink_maint_pid = 0;
+ spin_unlock(&sbinfo->si_plink_maint_lock);
+ wake_up_all(&sbinfo->si_plink_wq);
+}
+
+int au_plink_maint_enter(struct super_block *sb)
+{
+ int err;
+ struct au_sbinfo *sbinfo;
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ /* make sure i am the only one in this fs */
+ si_write_lock(sb, AuLock_FLUSH);
+ if (au_opt_test(au_mntflags(sb), PLINK)) {
+ spin_lock(&sbinfo->si_plink_maint_lock);
+ if (!sbinfo->si_plink_maint_pid)
+ sbinfo->si_plink_maint_pid = current->pid;
+ else
+ err = -EBUSY;
+ spin_unlock(&sbinfo->si_plink_maint_lock);
+ }
+ si_write_unlock(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_DEBUG
+void au_plink_list(struct super_block *sb)
+{
+ int i;
+ struct au_sbinfo *sbinfo;
+ struct hlist_head *plink_hlist;
+ struct pseudo_link *plink;
+
+ SiMustAnyLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ for (i = 0; i < AuPlink_NHASH; i++) {
+ plink_hlist = &sbinfo->si_plink[i].head;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(plink, plink_hlist, hlist)
+ AuDbg("%lu\n", plink->inode->i_ino);
+ rcu_read_unlock();
+ }
+}
+#endif
+
+/* is the inode pseudo-linked? */
+int au_plink_test(struct inode *inode)
+{
+ int found, i;
+ struct au_sbinfo *sbinfo;
+ struct hlist_head *plink_hlist;
+ struct pseudo_link *plink;
+
+ sbinfo = au_sbi(inode->i_sb);
+ AuRwMustAnyLock(&sbinfo->si_rwsem);
+ AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK));
+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+ found = 0;
+ i = au_plink_hash(inode->i_ino);
+ plink_hlist = &sbinfo->si_plink[i].head;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(plink, plink_hlist, hlist)
+ if (plink->inode == inode) {
+ found = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return found;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * generate a name for plink.
+ * the file will be stored under AUFS_WH_PLINKDIR.
+ */
+/* 20 is max digits length of ulong 64 */
+#define PLINK_NAME_LEN ((20 + 1) * 2)
+
+static int plink_name(char *name, int len, struct inode *inode,
+ aufs_bindex_t bindex)
+{
+ int rlen;
+ struct inode *h_inode;
+
+ h_inode = au_h_iptr(inode, bindex);
+ rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino);
+ return rlen;
+}
+
+struct au_do_plink_lkup_args {
+ struct dentry **errp;
+ struct qstr *tgtname;
+ struct dentry *h_parent;
+ struct au_branch *br;
+};
+
+static struct dentry *au_do_plink_lkup(struct qstr *tgtname,
+ struct dentry *h_parent,
+ struct au_branch *br)
+{
+ struct dentry *h_dentry;
+ struct inode *h_inode;
+
+ h_inode = d_inode(h_parent);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD2);
+ h_dentry = vfsub_lkup_one(tgtname, h_parent);
+ inode_unlock(h_inode);
+ return h_dentry;
+}
+
+static void au_call_do_plink_lkup(void *args)
+{
+ struct au_do_plink_lkup_args *a = args;
+ *a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br);
+}
+
+/* lookup the plink-ed @inode under the branch at @bindex */
+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex)
+{
+ struct dentry *h_dentry, *h_parent;
+ struct au_branch *br;
+ int wkq_err;
+ char a[PLINK_NAME_LEN];
+ struct qstr tgtname = QSTR_INIT(a, 0);
+
+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM));
+
+ br = au_sbr(inode->i_sb, bindex);
+ h_parent = br->br_wbr->wbr_plink;
+ tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+ struct au_do_plink_lkup_args args = {
+ .errp = &h_dentry,
+ .tgtname = &tgtname,
+ .h_parent = h_parent,
+ .br = br
+ };
+
+ wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args);
+ if (unlikely(wkq_err))
+ h_dentry = ERR_PTR(wkq_err);
+ } else
+ h_dentry = au_do_plink_lkup(&tgtname, h_parent, br);
+
+ return h_dentry;
+}
+
+/* create a pseudo-link */
+static int do_whplink(struct qstr *tgt, struct dentry *h_parent,
+ struct dentry *h_dentry, struct au_branch *br)
+{
+ int err;
+ struct path h_path = {
+ .mnt = au_br_mnt(br)
+ };
+ struct inode *h_dir, *delegated;
+
+ h_dir = d_inode(h_parent);
+ inode_lock_nested(h_dir, AuLsc_I_CHILD2);
+again:
+ h_path.dentry = vfsub_lkup_one(tgt, h_parent);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry))
+ goto out;
+
+ err = 0;
+ /* wh.plink dir is not monitored */
+ /* todo: is it really safe? */
+ if (d_is_positive(h_path.dentry)
+ && d_inode(h_path.dentry) != d_inode(h_dentry)) {
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, &h_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ dput(h_path.dentry);
+ h_path.dentry = NULL;
+ if (!err)
+ goto again;
+ }
+ if (!err && d_is_negative(h_path.dentry)) {
+ delegated = NULL;
+ err = vfsub_link(h_dentry, h_dir, &h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ }
+ dput(h_path.dentry);
+
+out:
+ inode_unlock(h_dir);
+ return err;
+}
+
+struct do_whplink_args {
+ int *errp;
+ struct qstr *tgt;
+ struct dentry *h_parent;
+ struct dentry *h_dentry;
+ struct au_branch *br;
+};
+
+static void call_do_whplink(void *args)
+{
+ struct do_whplink_args *a = args;
+ *a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br);
+}
+
+static int whplink(struct dentry *h_dentry, struct inode *inode,
+ aufs_bindex_t bindex, struct au_branch *br)
+{
+ int err, wkq_err;
+ struct au_wbr *wbr;
+ struct dentry *h_parent;
+ char a[PLINK_NAME_LEN];
+ struct qstr tgtname = QSTR_INIT(a, 0);
+
+ wbr = au_sbr(inode->i_sb, bindex)->br_wbr;
+ h_parent = wbr->wbr_plink;
+ tgtname.len = plink_name(a, sizeof(a), inode, bindex);
+
+ /* always superio. */
+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) {
+ struct do_whplink_args args = {
+ .errp = &err,
+ .tgt = &tgtname,
+ .h_parent = h_parent,
+ .h_dentry = h_dentry,
+ .br = br
+ };
+ wkq_err = au_wkq_wait(call_do_whplink, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ } else
+ err = do_whplink(&tgtname, h_parent, h_dentry, br);
+
+ return err;
+}
+
+/* free a single plink */
+static void do_put_plink(struct pseudo_link *plink, int do_del)
+{
+ if (do_del)
+ hlist_del(&plink->hlist);
+ iput(plink->inode);
+ kfree(plink);
+}
+
+static void do_put_plink_rcu(struct rcu_head *rcu)
+{
+ struct pseudo_link *plink;
+
+ plink = container_of(rcu, struct pseudo_link, rcu);
+ iput(plink->inode);
+ kfree(plink);
+}
+
+/*
+ * create a new pseudo-link for @h_dentry on @bindex.
+ * the linked inode is held in aufs @inode.
+ */
+void au_plink_append(struct inode *inode, aufs_bindex_t bindex,
+ struct dentry *h_dentry)
+{
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+ struct hlist_head *plink_hlist;
+ struct pseudo_link *plink, *tmp;
+ struct au_sphlhead *sphl;
+ int found, err, cnt, i;
+
+ sb = inode->i_sb;
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ found = au_plink_test(inode);
+ if (found)
+ return;
+
+ i = au_plink_hash(inode->i_ino);
+ sphl = sbinfo->si_plink + i;
+ plink_hlist = &sphl->head;
+ tmp = kmalloc(sizeof(*plink), GFP_NOFS);
+ if (tmp)
+ tmp->inode = au_igrab(inode);
+ else {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&sphl->spin);
+ hlist_for_each_entry(plink, plink_hlist, hlist) {
+ if (plink->inode == inode) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ hlist_add_head_rcu(&tmp->hlist, plink_hlist);
+ spin_unlock(&sphl->spin);
+ if (!found) {
+ cnt = au_sphl_count(sphl);
+#define msg "unexpectedly unblanced or too many pseudo-links"
+ if (cnt > AUFS_PLINK_WARN)
+ AuWarn1(msg ", %d\n", cnt);
+#undef msg
+ err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex));
+ } else {
+ do_put_plink(tmp, 0);
+ return;
+ }
+
+out:
+ if (unlikely(err)) {
+ pr_warn("err %d, damaged pseudo link.\n", err);
+ if (tmp) {
+ au_sphl_del_rcu(&tmp->hlist, sphl);
+ call_rcu(&tmp->rcu, do_put_plink_rcu);
+ }
+ }
+}
+
+/* free all plinks */
+void au_plink_put(struct super_block *sb, int verbose)
+{
+ int i, warned;
+ struct au_sbinfo *sbinfo;
+ struct hlist_head *plink_hlist;
+ struct hlist_node *tmp;
+ struct pseudo_link *plink;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ /* no spin_lock since sbinfo is write-locked */
+ warned = 0;
+ for (i = 0; i < AuPlink_NHASH; i++) {
+ plink_hlist = &sbinfo->si_plink[i].head;
+ if (!warned && verbose && !hlist_empty(plink_hlist)) {
+ pr_warn("pseudo-link is not flushed");
+ warned = 1;
+ }
+ hlist_for_each_entry_safe(plink, tmp, plink_hlist, hlist)
+ do_put_plink(plink, 0);
+ INIT_HLIST_HEAD(plink_hlist);
+ }
+}
+
+void au_plink_clean(struct super_block *sb, int verbose)
+{
+ struct dentry *root;
+
+ root = sb->s_root;
+ aufs_write_lock(root);
+ if (au_opt_test(au_mntflags(sb), PLINK))
+ au_plink_put(sb, verbose);
+ aufs_write_unlock(root);
+}
+
+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id)
+{
+ int do_put;
+ aufs_bindex_t bstart, bend, bindex;
+
+ do_put = 0;
+ bstart = au_ibstart(inode);
+ bend = au_ibend(inode);
+ if (bstart >= 0) {
+ for (bindex = bstart; bindex <= bend; bindex++) {
+ if (!au_h_iptr(inode, bindex)
+ || au_ii_br_id(inode, bindex) != br_id)
+ continue;
+ au_set_h_iptr(inode, bindex, NULL, 0);
+ do_put = 1;
+ break;
+ }
+ if (do_put)
+ for (bindex = bstart; bindex <= bend; bindex++)
+ if (au_h_iptr(inode, bindex)) {
+ do_put = 0;
+ break;
+ }
+ } else
+ do_put = 1;
+
+ return do_put;
+}
+
+/* free the plinks on a branch specified by @br_id */
+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id)
+{
+ struct au_sbinfo *sbinfo;
+ struct hlist_head *plink_hlist;
+ struct hlist_node *tmp;
+ struct pseudo_link *plink;
+ struct inode *inode;
+ int i, do_put;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK));
+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM));
+
+ /* no spin_lock since sbinfo is write-locked */
+ for (i = 0; i < AuPlink_NHASH; i++) {
+ plink_hlist = &sbinfo->si_plink[i].head;
+ hlist_for_each_entry_safe(plink, tmp, plink_hlist, hlist) {
+ inode = au_igrab(plink->inode);
+ ii_write_lock_child(inode);
+ do_put = au_plink_do_half_refresh(inode, br_id);
+ if (do_put)
+ do_put_plink(plink, 1);
+ ii_write_unlock(inode);
+ iput(inode);
+ }
+ }
+}
diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c
new file mode 100644
index 000000000..dd2baf5dc
--- /dev/null
+++ b/fs/aufs/poll.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * poll operation
+ * There is only one filesystem which implements ->poll operation, currently.
+ */
+
+#include "aufs.h"
+
+unsigned int aufs_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask;
+ int err;
+ struct file *h_file;
+ struct super_block *sb;
+
+ /* We should pretend an error happened. */
+ mask = POLLERR /* | POLLIN | POLLOUT */;
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW);
+
+ h_file = au_read_pre(file, /*keep_fi*/0);
+ err = PTR_ERR(h_file);
+ if (IS_ERR(h_file))
+ goto out;
+
+ /* it is not an error if h_file has no operation */
+ mask = DEFAULT_POLLMASK;
+ if (h_file->f_op->poll)
+ mask = h_file->f_op->poll(h_file, wait);
+ fput(h_file); /* instead of au_read_post() */
+
+out:
+ si_read_unlock(sb);
+ AuTraceErr((int)mask);
+ return mask;
+}
diff --git a/fs/aufs/posix_acl.c b/fs/aufs/posix_acl.c
new file mode 100644
index 000000000..d6eeebdec
--- /dev/null
+++ b/fs/aufs/posix_acl.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014-2016 Junjiro R. Okajima
+ */
+
+/*
+ * posix acl operations
+ */
+
+#include <linux/fs.h>
+#include "aufs.h"
+
+struct posix_acl *aufs_get_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ int err;
+ aufs_bindex_t bindex;
+ struct inode *h_inode;
+ struct super_block *sb;
+
+ acl = NULL;
+ sb = inode->i_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ ii_read_lock_child(inode);
+ if (!(sb->s_flags & MS_POSIXACL))
+ goto out;
+
+ bindex = au_ibstart(inode);
+ h_inode = au_h_iptr(inode, bindex);
+ if (unlikely(!h_inode
+ || ((h_inode->i_mode & S_IFMT)
+ != (inode->i_mode & S_IFMT)))) {
+ err = au_busy_or_stale();
+ acl = ERR_PTR(err);
+ goto out;
+ }
+
+ /* always topmost only */
+ acl = get_acl(h_inode, type);
+
+out:
+ ii_read_unlock(inode);
+ si_read_unlock(sb);
+
+ AuTraceErrPtr(acl);
+ return acl;
+}
+
+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+ int err;
+ ssize_t ssz;
+ struct dentry *dentry;
+ struct au_srxattr arg = {
+ .type = AU_ACL_SET,
+ .u.acl_set = {
+ .acl = acl,
+ .type = type
+ },
+ };
+
+ inode_lock(inode);
+ if (inode->i_ino == AUFS_ROOT_INO)
+ dentry = dget(inode->i_sb->s_root);
+ else {
+ dentry = d_find_alias(inode);
+ if (!dentry)
+ dentry = d_find_any_alias(inode);
+ if (!dentry) {
+ pr_warn("cannot handle this inode, "
+ "please report to aufs-users ML\n");
+ err = -ENOENT;
+ goto out;
+ }
+ }
+
+ ssz = au_srxattr(dentry, &arg);
+ dput(dentry);
+ err = ssz;
+ if (ssz >= 0)
+ err = 0;
+
+out:
+ inode_unlock(inode);
+ return err;
+}
diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c
new file mode 100644
index 000000000..2c8893edf
--- /dev/null
+++ b/fs/aufs/procfs.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2010-2016 Junjiro R. Okajima
+ */
+
+/*
+ * procfs interfaces
+ */
+
+#include <linux/proc_fs.h>
+#include "aufs.h"
+
+static int au_procfs_plm_release(struct inode *inode, struct file *file)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = file->private_data;
+ if (sbinfo) {
+ au_plink_maint_leave(sbinfo);
+ kobject_put(&sbinfo->si_kobj);
+ }
+
+ return 0;
+}
+
+static void au_procfs_plm_write_clean(struct file *file)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = file->private_data;
+ if (sbinfo)
+ au_plink_clean(sbinfo->si_sb, /*verbose*/0);
+}
+
+static int au_procfs_plm_write_si(struct file *file, unsigned long id)
+{
+ int err;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+ err = -EBUSY;
+ if (unlikely(file->private_data))
+ goto out;
+
+ sb = NULL;
+ /* don't use au_sbilist_lock() here */
+ spin_lock(&au_sbilist.spin);
+ list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
+ if (id == sysaufs_si_id(sbinfo)) {
+ kobject_get(&sbinfo->si_kobj);
+ sb = sbinfo->si_sb;
+ break;
+ }
+ spin_unlock(&au_sbilist.spin);
+
+ err = -EINVAL;
+ if (unlikely(!sb))
+ goto out;
+
+ err = au_plink_maint_enter(sb);
+ if (!err)
+ /* keep kobject_get() */
+ file->private_data = sbinfo;
+ else
+ kobject_put(&sbinfo->si_kobj);
+out:
+ return err;
+}
+
+/*
+ * Accept a valid "si=xxxx" only.
+ * Once it is accepted successfully, accept "clean" too.
+ */
+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t err;
+ unsigned long id;
+ /* last newline is allowed */
+ char buf[3 + sizeof(unsigned long) * 2 + 1];
+
+ err = -EACCES;
+ if (unlikely(!capable(CAP_SYS_ADMIN)))
+ goto out;
+
+ err = -EINVAL;
+ if (unlikely(count > sizeof(buf)))
+ goto out;
+
+ err = copy_from_user(buf, ubuf, count);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ goto out;
+ }
+ buf[count] = 0;
+
+ err = -EINVAL;
+ if (!strcmp("clean", buf)) {
+ au_procfs_plm_write_clean(file);
+ goto out_success;
+ } else if (unlikely(strncmp("si=", buf, 3)))
+ goto out;
+
+ err = kstrtoul(buf + 3, 16, &id);
+ if (unlikely(err))
+ goto out;
+
+ err = au_procfs_plm_write_si(file, id);
+ if (unlikely(err))
+ goto out;
+
+out_success:
+ err = count; /* success */
+out:
+ return err;
+}
+
+static const struct file_operations au_procfs_plm_fop = {
+ .write = au_procfs_plm_write,
+ .release = au_procfs_plm_release,
+ .owner = THIS_MODULE
+};
+
+/* ---------------------------------------------------------------------- */
+
+static struct proc_dir_entry *au_procfs_dir;
+
+void au_procfs_fin(void)
+{
+ remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir);
+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+}
+
+int __init au_procfs_init(void)
+{
+ int err;
+ struct proc_dir_entry *entry;
+
+ err = -ENOMEM;
+ au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL);
+ if (unlikely(!au_procfs_dir))
+ goto out;
+
+ entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | S_IWUSR,
+ au_procfs_dir, &au_procfs_plm_fop);
+ if (unlikely(!entry))
+ goto out_dir;
+
+ err = 0;
+ goto out; /* success */
+
+
+out_dir:
+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL);
+out:
+ return err;
+}
diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c
new file mode 100644
index 000000000..ec0844673
--- /dev/null
+++ b/fs/aufs/rdu.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * readdir in userspace.
+ */
+
+#include <linux/compat.h>
+#include <linux/fs_stack.h>
+#include <linux/security.h>
+#include "aufs.h"
+
+/* bits for struct aufs_rdu.flags */
+#define AuRdu_CALLED 1
+#define AuRdu_CONT (1 << 1)
+#define AuRdu_FULL (1 << 2)
+#define au_ftest_rdu(flags, name) ((flags) & AuRdu_##name)
+#define au_fset_rdu(flags, name) \
+ do { (flags) |= AuRdu_##name; } while (0)
+#define au_fclr_rdu(flags, name) \
+ do { (flags) &= ~AuRdu_##name; } while (0)
+
+struct au_rdu_arg {
+ struct dir_context ctx;
+ struct aufs_rdu *rdu;
+ union au_rdu_ent_ul ent;
+ unsigned long end;
+
+ struct super_block *sb;
+ int err;
+};
+
+static int au_rdu_fill(struct dir_context *ctx, const char *name, int nlen,
+ loff_t offset, u64 h_ino, unsigned int d_type)
+{
+ int err, len;
+ struct au_rdu_arg *arg = container_of(ctx, struct au_rdu_arg, ctx);
+ struct aufs_rdu *rdu = arg->rdu;
+ struct au_rdu_ent ent;
+
+ err = 0;
+ arg->err = 0;
+ au_fset_rdu(rdu->cookie.flags, CALLED);
+ len = au_rdu_len(nlen);
+ if (arg->ent.ul + len < arg->end) {
+ ent.ino = h_ino;
+ ent.bindex = rdu->cookie.bindex;
+ ent.type = d_type;
+ ent.nlen = nlen;
+ if (unlikely(nlen > AUFS_MAX_NAMELEN))
+ ent.type = DT_UNKNOWN;
+
+ /* unnecessary to support mmap_sem since this is a dir */
+ err = -EFAULT;
+ if (copy_to_user(arg->ent.e, &ent, sizeof(ent)))
+ goto out;
+ if (copy_to_user(arg->ent.e->name, name, nlen))
+ goto out;
+ /* the terminating NULL */
+ if (__put_user(0, arg->ent.e->name + nlen))
+ goto out;
+ err = 0;
+ /* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */
+ arg->ent.ul += len;
+ rdu->rent++;
+ } else {
+ err = -EFAULT;
+ au_fset_rdu(rdu->cookie.flags, FULL);
+ rdu->full = 1;
+ rdu->tail = arg->ent;
+ }
+
+out:
+ /* AuTraceErr(err); */
+ return err;
+}
+
+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg)
+{
+ int err;
+ loff_t offset;
+ struct au_rdu_cookie *cookie = &arg->rdu->cookie;
+
+ /* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */
+ offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET);
+ err = offset;
+ if (unlikely(offset != cookie->h_pos))
+ goto out;
+
+ err = 0;
+ do {
+ arg->err = 0;
+ au_fclr_rdu(cookie->flags, CALLED);
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(h_file, &arg->ctx);
+ if (err >= 0)
+ err = arg->err;
+ } while (!err
+ && au_ftest_rdu(cookie->flags, CALLED)
+ && !au_ftest_rdu(cookie->flags, FULL));
+ cookie->h_pos = h_file->f_pos;
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_rdu(struct file *file, struct aufs_rdu *rdu)
+{
+ int err;
+ aufs_bindex_t bend;
+ struct au_rdu_arg arg = {
+ .ctx = {
+ .actor = au_rdu_fill
+ }
+ };
+ struct dentry *dentry;
+ struct inode *inode;
+ struct file *h_file;
+ struct au_rdu_cookie *cookie = &rdu->cookie;
+
+ err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+ rdu->rent = 0;
+ rdu->tail = rdu->ent;
+ rdu->full = 0;
+ arg.rdu = rdu;
+ arg.ent = rdu->ent;
+ arg.end = arg.ent.ul;
+ arg.end += rdu->sz;
+
+ err = -ENOTDIR;
+ if (unlikely(!file->f_op->iterate))
+ goto out;
+
+ err = security_file_permission(file, MAY_READ);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out;
+
+ dentry = file->f_path.dentry;
+ inode = d_inode(dentry);
+#if 1
+ inode_lock(inode);
+#else
+ /* todo: create a new inline func inode_lock_killable() */
+ err = mutex_lock_killable(&inode->i_mutex);
+ AuTraceErr(err);
+ if (unlikely(err))
+ goto out;
+#endif
+
+ arg.sb = inode->i_sb;
+ err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_mtx;
+ err = au_alive_dir(dentry);
+ if (unlikely(err))
+ goto out_si;
+ /* todo: reval? */
+ fi_read_lock(file);
+
+ err = -EAGAIN;
+ if (unlikely(au_ftest_rdu(cookie->flags, CONT)
+ && cookie->generation != au_figen(file)))
+ goto out_unlock;
+
+ err = 0;
+ if (!rdu->blk) {
+ rdu->blk = au_sbi(arg.sb)->si_rdblk;
+ if (!rdu->blk)
+ rdu->blk = au_dir_size(file, /*dentry*/NULL);
+ }
+ bend = au_fbstart(file);
+ if (cookie->bindex < bend)
+ cookie->bindex = bend;
+ bend = au_fbend_dir(file);
+ /* AuDbg("b%d, b%d\n", cookie->bindex, bend); */
+ for (; !err && cookie->bindex <= bend;
+ cookie->bindex++, cookie->h_pos = 0) {
+ h_file = au_hf_dir(file, cookie->bindex);
+ if (!h_file)
+ continue;
+
+ au_fclr_rdu(cookie->flags, FULL);
+ err = au_rdu_do(h_file, &arg);
+ AuTraceErr(err);
+ if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err))
+ break;
+ }
+ AuDbg("rent %llu\n", rdu->rent);
+
+ if (!err && !au_ftest_rdu(cookie->flags, CONT)) {
+ rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH);
+ au_fset_rdu(cookie->flags, CONT);
+ cookie->generation = au_figen(file);
+ }
+
+ ii_read_lock_child(inode);
+ fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibstart(inode)));
+ ii_read_unlock(inode);
+
+out_unlock:
+ fi_read_unlock(file);
+out_si:
+ si_read_unlock(arg.sb);
+out_mtx:
+ inode_unlock(inode);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu)
+{
+ int err;
+ ino_t ino;
+ unsigned long long nent;
+ union au_rdu_ent_ul *u;
+ struct au_rdu_ent ent;
+ struct super_block *sb;
+
+ err = 0;
+ nent = rdu->nent;
+ u = &rdu->ent;
+ sb = file->f_path.dentry->d_sb;
+ si_read_lock(sb, AuLock_FLUSH);
+ while (nent-- > 0) {
+ /* unnecessary to support mmap_sem since this is a dir */
+ err = copy_from_user(&ent, u->e, sizeof(ent));
+ if (!err)
+ err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ break;
+ }
+
+ /* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */
+ if (!ent.wh)
+ err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino);
+ else
+ err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type,
+ &ino);
+ if (unlikely(err)) {
+ AuTraceErr(err);
+ break;
+ }
+
+ err = __put_user(ino, &u->e->ino);
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ break;
+ }
+ u->ul += au_rdu_len(ent.nlen);
+ }
+ si_read_unlock(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_rdu_verify(struct aufs_rdu *rdu)
+{
+ AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | "
+ "%llu, b%d, 0x%x, g%u}\n",
+ rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ],
+ rdu->blk,
+ rdu->rent, rdu->shwh, rdu->full,
+ rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags,
+ rdu->cookie.generation);
+
+ if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu))
+ return 0;
+
+ AuDbg("%u:%u\n",
+ rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu));
+ return -EINVAL;
+}
+
+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err, e;
+ struct aufs_rdu rdu;
+ void __user *p = (void __user *)arg;
+
+ err = copy_from_user(&rdu, p, sizeof(rdu));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+ err = au_rdu_verify(&rdu);
+ if (unlikely(err))
+ goto out;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ err = au_rdu(file, &rdu);
+ if (unlikely(err))
+ break;
+
+ e = copy_to_user(p, &rdu, sizeof(rdu));
+ if (unlikely(e)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ }
+ break;
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_ino(file, &rdu);
+ break;
+
+ default:
+ /* err = -ENOTTY; */
+ err = -EINVAL;
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+#ifdef CONFIG_COMPAT
+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long err, e;
+ struct aufs_rdu rdu;
+ void __user *p = compat_ptr(arg);
+
+ /* todo: get_user()? */
+ err = copy_from_user(&rdu, p, sizeof(rdu));
+ if (unlikely(err)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ goto out;
+ }
+ rdu.ent.e = compat_ptr(rdu.ent.ul);
+ err = au_rdu_verify(&rdu);
+ if (unlikely(err))
+ goto out;
+
+ switch (cmd) {
+ case AUFS_CTL_RDU:
+ err = au_rdu(file, &rdu);
+ if (unlikely(err))
+ break;
+
+ rdu.ent.ul = ptr_to_compat(rdu.ent.e);
+ rdu.tail.ul = ptr_to_compat(rdu.tail.e);
+ e = copy_to_user(p, &rdu, sizeof(rdu));
+ if (unlikely(e)) {
+ err = -EFAULT;
+ AuTraceErr(err);
+ }
+ break;
+ case AUFS_CTL_RDU_INO:
+ err = au_rdu_ino(file, &rdu);
+ break;
+
+ default:
+ /* err = -ENOTTY; */
+ err = -EINVAL;
+ }
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+#endif
diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h
new file mode 100644
index 000000000..ef50c2ccb
--- /dev/null
+++ b/fs/aufs/rwsem.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * simple read-write semaphore wrappers
+ */
+
+#ifndef __AUFS_RWSEM_H__
+#define __AUFS_RWSEM_H__
+
+#ifdef __KERNEL__
+
+#include "debug.h"
+
+struct au_rwsem {
+ struct rw_semaphore rwsem;
+#ifdef CONFIG_AUFS_DEBUG
+ /* just for debugging, not almighty counter */
+ atomic_t rcnt, wcnt;
+#endif
+};
+
+#ifdef CONFIG_AUFS_DEBUG
+#define AuDbgCntInit(rw) do { \
+ atomic_set(&(rw)->rcnt, 0); \
+ atomic_set(&(rw)->wcnt, 0); \
+ smp_mb(); /* atomic set */ \
+} while (0)
+
+#define AuDbgRcntInc(rw) atomic_inc(&(rw)->rcnt)
+#define AuDbgRcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0)
+#define AuDbgWcntInc(rw) atomic_inc(&(rw)->wcnt)
+#define AuDbgWcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0)
+#else
+#define AuDbgCntInit(rw) do {} while (0)
+#define AuDbgRcntInc(rw) do {} while (0)
+#define AuDbgRcntDec(rw) do {} while (0)
+#define AuDbgWcntInc(rw) do {} while (0)
+#define AuDbgWcntDec(rw) do {} while (0)
+#endif /* CONFIG_AUFS_DEBUG */
+
+/* to debug easier, do not make them inlined functions */
+#define AuRwMustNoWaiters(rw) AuDebugOn(!list_empty(&(rw)->rwsem.wait_list))
+/* rwsem_is_locked() is unusable */
+#define AuRwMustReadLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0)
+#define AuRwMustWriteLock(rw) AuDebugOn(atomic_read(&(rw)->wcnt) <= 0)
+#define AuRwMustAnyLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \
+ && atomic_read(&(rw)->wcnt) <= 0)
+#define AuRwDestroy(rw) AuDebugOn(atomic_read(&(rw)->rcnt) \
+ || atomic_read(&(rw)->wcnt))
+
+#define au_rw_class(rw, key) lockdep_set_class(&(rw)->rwsem, key)
+
+static inline void au_rw_init(struct au_rwsem *rw)
+{
+ AuDbgCntInit(rw);
+ init_rwsem(&rw->rwsem);
+}
+
+static inline void au_rw_init_wlock(struct au_rwsem *rw)
+{
+ au_rw_init(rw);
+ down_write(&rw->rwsem);
+ AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_init_wlock_nested(struct au_rwsem *rw,
+ unsigned int lsc)
+{
+ au_rw_init(rw);
+ down_write_nested(&rw->rwsem, lsc);
+ AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_read_lock(struct au_rwsem *rw)
+{
+ down_read(&rw->rwsem);
+ AuDbgRcntInc(rw);
+}
+
+static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc)
+{
+ down_read_nested(&rw->rwsem, lsc);
+ AuDbgRcntInc(rw);
+}
+
+static inline void au_rw_read_unlock(struct au_rwsem *rw)
+{
+ AuRwMustReadLock(rw);
+ AuDbgRcntDec(rw);
+ up_read(&rw->rwsem);
+}
+
+static inline void au_rw_dgrade_lock(struct au_rwsem *rw)
+{
+ AuRwMustWriteLock(rw);
+ AuDbgRcntInc(rw);
+ AuDbgWcntDec(rw);
+ downgrade_write(&rw->rwsem);
+}
+
+static inline void au_rw_write_lock(struct au_rwsem *rw)
+{
+ down_write(&rw->rwsem);
+ AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_write_lock_nested(struct au_rwsem *rw,
+ unsigned int lsc)
+{
+ down_write_nested(&rw->rwsem, lsc);
+ AuDbgWcntInc(rw);
+}
+
+static inline void au_rw_write_unlock(struct au_rwsem *rw)
+{
+ AuRwMustWriteLock(rw);
+ AuDbgWcntDec(rw);
+ up_write(&rw->rwsem);
+}
+
+/* why is not _nested version defined */
+static inline int au_rw_read_trylock(struct au_rwsem *rw)
+{
+ int ret;
+
+ ret = down_read_trylock(&rw->rwsem);
+ if (ret)
+ AuDbgRcntInc(rw);
+ return ret;
+}
+
+static inline int au_rw_write_trylock(struct au_rwsem *rw)
+{
+ int ret;
+
+ ret = down_write_trylock(&rw->rwsem);
+ if (ret)
+ AuDbgWcntInc(rw);
+ return ret;
+}
+
+#undef AuDbgCntInit
+#undef AuDbgRcntInc
+#undef AuDbgRcntDec
+#undef AuDbgWcntInc
+#undef AuDbgWcntDec
+
+#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
+static inline void prefix##_read_lock(param) \
+{ au_rw_read_lock(rwsem); } \
+static inline void prefix##_write_lock(param) \
+{ au_rw_write_lock(rwsem); } \
+static inline int prefix##_read_trylock(param) \
+{ return au_rw_read_trylock(rwsem); } \
+static inline int prefix##_write_trylock(param) \
+{ return au_rw_write_trylock(rwsem); }
+/* why is not _nested version defined */
+/* static inline void prefix##_read_trylock_nested(param, lsc)
+{ au_rw_read_trylock_nested(rwsem, lsc)); }
+static inline void prefix##_write_trylock_nestd(param, lsc)
+{ au_rw_write_trylock_nested(rwsem, lsc); } */
+
+#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \
+static inline void prefix##_read_unlock(param) \
+{ au_rw_read_unlock(rwsem); } \
+static inline void prefix##_write_unlock(param) \
+{ au_rw_write_unlock(rwsem); } \
+static inline void prefix##_downgrade_lock(param) \
+{ au_rw_dgrade_lock(rwsem); }
+
+#define AuSimpleRwsemFuncs(prefix, param, rwsem) \
+ AuSimpleLockRwsemFuncs(prefix, param, rwsem) \
+ AuSimpleUnlockRwsemFuncs(prefix, param, rwsem)
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_RWSEM_H__ */
diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c
new file mode 100644
index 000000000..a9d782e43
--- /dev/null
+++ b/fs/aufs/sbinfo.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * superblock private data
+ */
+
+#include "aufs.h"
+
+/*
+ * they are necessary regardless sysfs is disabled.
+ */
+void au_si_free(struct kobject *kobj)
+{
+ int i;
+ struct au_sbinfo *sbinfo;
+ char *locked __maybe_unused; /* debug only */
+
+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+ for (i = 0; i < AuPlink_NHASH; i++)
+ AuDebugOn(!hlist_empty(&sbinfo->si_plink[i].head));
+ AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len));
+
+ au_rw_write_lock(&sbinfo->si_rwsem);
+ au_br_free(sbinfo);
+ au_rw_write_unlock(&sbinfo->si_rwsem);
+
+ kfree(sbinfo->si_branch);
+ for (i = 0; i < AU_NPIDMAP; i++)
+ kfree(sbinfo->au_si_pid.pid_bitmap[i]);
+ mutex_destroy(&sbinfo->au_si_pid.pid_mtx);
+ mutex_destroy(&sbinfo->si_xib_mtx);
+ AuRwDestroy(&sbinfo->si_rwsem);
+
+ kfree(sbinfo);
+}
+
+int au_si_alloc(struct super_block *sb)
+{
+ int err, i;
+ struct au_sbinfo *sbinfo;
+ static struct lock_class_key aufs_si;
+
+ err = -ENOMEM;
+ sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS);
+ if (unlikely(!sbinfo))
+ goto out;
+
+ /* will be reallocated separately */
+ sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS);
+ if (unlikely(!sbinfo->si_branch))
+ goto out_sbinfo;
+
+ err = sysaufs_si_init(sbinfo);
+ if (unlikely(err))
+ goto out_br;
+
+ au_nwt_init(&sbinfo->si_nowait);
+ au_rw_init_wlock(&sbinfo->si_rwsem);
+ au_rw_class(&sbinfo->si_rwsem, &aufs_si);
+ mutex_init(&sbinfo->au_si_pid.pid_mtx);
+
+ atomic_long_set(&sbinfo->si_ninodes, 0);
+ atomic_long_set(&sbinfo->si_nfiles, 0);
+
+ sbinfo->si_bend = -1;
+ sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2;
+
+ sbinfo->si_wbr_copyup = AuWbrCopyup_Def;
+ sbinfo->si_wbr_create = AuWbrCreate_Def;
+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup;
+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create;
+
+ au_fhsm_init(sbinfo);
+
+ sbinfo->si_mntflags = au_opts_plink(AuOpt_Def);
+
+ sbinfo->si_xino_jiffy = jiffies;
+ sbinfo->si_xino_expire
+ = msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC);
+ mutex_init(&sbinfo->si_xib_mtx);
+ sbinfo->si_xino_brid = -1;
+ /* leave si_xib_last_pindex and si_xib_next_bit */
+
+ au_sphl_init(&sbinfo->si_aopen);
+
+ sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC);
+ sbinfo->si_rdblk = AUFS_RDBLK_DEF;
+ sbinfo->si_rdhash = AUFS_RDHASH_DEF;
+ sbinfo->si_dirwh = AUFS_DIRWH_DEF;
+
+ for (i = 0; i < AuPlink_NHASH; i++)
+ au_sphl_init(sbinfo->si_plink + i);
+ init_waitqueue_head(&sbinfo->si_plink_wq);
+ spin_lock_init(&sbinfo->si_plink_maint_lock);
+
+ au_sphl_init(&sbinfo->si_files);
+
+ /* with getattr by default */
+ sbinfo->si_iop_array = aufs_iop;
+
+ /* leave other members for sysaufs and si_mnt. */
+ sbinfo->si_sb = sb;
+ sb->s_fs_info = sbinfo;
+ si_pid_set(sb);
+ return 0; /* success */
+
+out_br:
+ kfree(sbinfo->si_branch);
+out_sbinfo:
+ kfree(sbinfo);
+out:
+ return err;
+}
+
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr)
+{
+ int err, sz;
+ struct au_branch **brp;
+
+ AuRwMustWriteLock(&sbinfo->si_rwsem);
+
+ err = -ENOMEM;
+ sz = sizeof(*brp) * (sbinfo->si_bend + 1);
+ if (unlikely(!sz))
+ sz = sizeof(*brp);
+ brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS);
+ if (brp) {
+ sbinfo->si_branch = brp;
+ err = 0;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+unsigned int au_sigen_inc(struct super_block *sb)
+{
+ unsigned int gen;
+ struct inode *inode;
+
+ SiMustWriteLock(sb);
+
+ gen = ++au_sbi(sb)->si_generation;
+ au_update_digen(sb->s_root);
+ inode = d_inode(sb->s_root);
+ au_update_iigen(inode, /*half*/0);
+ inode->i_version++;
+ return gen;
+}
+
+aufs_bindex_t au_new_br_id(struct super_block *sb)
+{
+ aufs_bindex_t br_id;
+ int i;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ for (i = 0; i <= AUFS_BRANCH_MAX; i++) {
+ br_id = ++sbinfo->si_last_br_id;
+ AuDebugOn(br_id < 0);
+ if (br_id && au_br_index(sb, br_id) < 0)
+ return br_id;
+ }
+
+ return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* it is ok that new 'nwt' tasks are appended while we are sleeping */
+int si_read_lock(struct super_block *sb, int flags)
+{
+ int err;
+
+ err = 0;
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+ si_noflush_read_lock(sb);
+ err = au_plink_maint(sb, flags);
+ if (unlikely(err))
+ si_read_unlock(sb);
+
+ return err;
+}
+
+int si_write_lock(struct super_block *sb, int flags)
+{
+ int err;
+
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+
+ si_noflush_write_lock(sb);
+ err = au_plink_maint(sb, flags);
+ if (unlikely(err))
+ si_write_unlock(sb);
+
+ return err;
+}
+
+/* dentry and super_block lock. call at entry point */
+int aufs_read_lock(struct dentry *dentry, int flags)
+{
+ int err;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, flags);
+ if (unlikely(err))
+ goto out;
+
+ if (au_ftest_lock(flags, DW))
+ di_write_lock_child(dentry);
+ else
+ di_read_lock_child(dentry, flags);
+
+ if (au_ftest_lock(flags, GEN)) {
+ err = au_digen_test(dentry, au_sigen(sb));
+ if (!au_opt_test(au_mntflags(sb), UDBA_NONE))
+ AuDebugOn(!err && au_dbrange_test(dentry));
+ else if (!err)
+ err = au_dbrange_test(dentry);
+ if (unlikely(err))
+ aufs_read_unlock(dentry, flags);
+ }
+
+out:
+ return err;
+}
+
+void aufs_read_unlock(struct dentry *dentry, int flags)
+{
+ if (au_ftest_lock(flags, DW))
+ di_write_unlock(dentry);
+ else
+ di_read_unlock(dentry, flags);
+ si_read_unlock(dentry->d_sb);
+}
+
+void aufs_write_lock(struct dentry *dentry)
+{
+ si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW);
+ di_write_lock_child(dentry);
+}
+
+void aufs_write_unlock(struct dentry *dentry)
+{
+ di_write_unlock(dentry);
+ si_write_unlock(dentry->d_sb);
+}
+
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags)
+{
+ int err;
+ unsigned int sigen;
+ struct super_block *sb;
+
+ sb = d1->d_sb;
+ err = si_read_lock(sb, flags);
+ if (unlikely(err))
+ goto out;
+
+ di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIRS));
+
+ if (au_ftest_lock(flags, GEN)) {
+ sigen = au_sigen(sb);
+ err = au_digen_test(d1, sigen);
+ AuDebugOn(!err && au_dbrange_test(d1));
+ if (!err) {
+ err = au_digen_test(d2, sigen);
+ AuDebugOn(!err && au_dbrange_test(d2));
+ }
+ if (unlikely(err))
+ aufs_read_and_write_unlock2(d1, d2);
+ }
+
+out:
+ return err;
+}
+
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2)
+{
+ di_write_unlock2(d1, d2);
+ si_read_unlock(d1->d_sb);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void si_pid_alloc(struct au_si_pid *au_si_pid, int idx)
+{
+ unsigned long *p;
+
+ BUILD_BUG_ON(sizeof(unsigned long) !=
+ sizeof(*au_si_pid->pid_bitmap));
+
+ mutex_lock(&au_si_pid->pid_mtx);
+ p = au_si_pid->pid_bitmap[idx];
+ while (!p) {
+ /*
+ * bad approach.
+ * but keeping 'si_pid_set()' void is more important.
+ */
+ p = kcalloc(BITS_TO_LONGS(AU_PIDSTEP),
+ sizeof(*au_si_pid->pid_bitmap),
+ GFP_NOFS);
+ if (p)
+ break;
+ cond_resched();
+ }
+ au_si_pid->pid_bitmap[idx] = p;
+ mutex_unlock(&au_si_pid->pid_mtx);
+}
+
+void si_pid_set(struct super_block *sb)
+{
+ pid_t bit;
+ int idx;
+ unsigned long *bitmap;
+ struct au_si_pid *au_si_pid;
+
+ si_pid_idx_bit(&idx, &bit);
+ au_si_pid = &au_sbi(sb)->au_si_pid;
+ bitmap = au_si_pid->pid_bitmap[idx];
+ if (!bitmap) {
+ si_pid_alloc(au_si_pid, idx);
+ bitmap = au_si_pid->pid_bitmap[idx];
+ }
+ AuDebugOn(test_bit(bit, bitmap));
+ set_bit(bit, bitmap);
+ /* smp_mb(); */
+}
diff --git a/fs/aufs/spl.h b/fs/aufs/spl.h
new file mode 100644
index 000000000..f9b528826
--- /dev/null
+++ b/fs/aufs/spl.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * simple list protected by a spinlock
+ */
+
+#ifndef __AUFS_SPL_H__
+#define __AUFS_SPL_H__
+
+#ifdef __KERNEL__
+
+struct au_splhead {
+ spinlock_t spin;
+ struct list_head head;
+};
+
+static inline void au_spl_init(struct au_splhead *spl)
+{
+ spin_lock_init(&spl->spin);
+ INIT_LIST_HEAD(&spl->head);
+}
+
+static inline void au_spl_add(struct list_head *list, struct au_splhead *spl)
+{
+ spin_lock(&spl->spin);
+ list_add(list, &spl->head);
+ spin_unlock(&spl->spin);
+}
+
+static inline void au_spl_del(struct list_head *list, struct au_splhead *spl)
+{
+ spin_lock(&spl->spin);
+ list_del(list);
+ spin_unlock(&spl->spin);
+}
+
+static inline void au_spl_del_rcu(struct list_head *list,
+ struct au_splhead *spl)
+{
+ spin_lock(&spl->spin);
+ list_del_rcu(list);
+ spin_unlock(&spl->spin);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_sphlhead {
+ spinlock_t spin;
+ struct hlist_head head;
+};
+
+static inline void au_sphl_init(struct au_sphlhead *sphl)
+{
+ spin_lock_init(&sphl->spin);
+ INIT_HLIST_HEAD(&sphl->head);
+}
+
+static inline void au_sphl_add(struct hlist_node *hlist,
+ struct au_sphlhead *sphl)
+{
+ spin_lock(&sphl->spin);
+ hlist_add_head(hlist, &sphl->head);
+ spin_unlock(&sphl->spin);
+}
+
+static inline void au_sphl_del(struct hlist_node *hlist,
+ struct au_sphlhead *sphl)
+{
+ spin_lock(&sphl->spin);
+ hlist_del(hlist);
+ spin_unlock(&sphl->spin);
+}
+
+static inline void au_sphl_del_rcu(struct hlist_node *hlist,
+ struct au_sphlhead *sphl)
+{
+ spin_lock(&sphl->spin);
+ hlist_del_rcu(hlist);
+ spin_unlock(&sphl->spin);
+}
+
+static inline unsigned long au_sphl_count(struct au_sphlhead *sphl)
+{
+ unsigned long cnt;
+ struct hlist_node *pos;
+
+ cnt = 0;
+ spin_lock(&sphl->spin);
+ hlist_for_each(pos, &sphl->head)
+ cnt++;
+ spin_unlock(&sphl->spin);
+ return cnt;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SPL_H__ */
diff --git a/fs/aufs/super.c b/fs/aufs/super.c
new file mode 100644
index 000000000..7928a5031
--- /dev/null
+++ b/fs/aufs/super.c
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * mount and super_block operations
+ */
+
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include "aufs.h"
+
+/*
+ * super_operations
+ */
+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused)
+{
+ struct au_icntnr *c;
+
+ c = au_cache_alloc_icntnr();
+ if (c) {
+ au_icntnr_init(c);
+ c->vfs_inode.i_version = 1; /* sigen(sb); */
+ c->iinfo.ii_hinode = NULL;
+ return &c->vfs_inode;
+ }
+ return NULL;
+}
+
+static void aufs_destroy_inode_cb(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ INIT_HLIST_HEAD(&inode->i_dentry);
+ au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode));
+}
+
+static void aufs_destroy_inode(struct inode *inode)
+{
+ au_iinfo_fin(inode);
+ call_rcu(&inode->i_rcu, aufs_destroy_inode_cb);
+}
+
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino)
+{
+ struct inode *inode;
+ int err;
+
+ inode = iget_locked(sb, ino);
+ if (unlikely(!inode)) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ if (!(inode->i_state & I_NEW))
+ goto out;
+
+ err = au_xigen_new(inode);
+ if (!err)
+ err = au_iinfo_init(inode);
+ if (!err)
+ inode->i_version++;
+ else {
+ iget_failed(inode);
+ inode = ERR_PTR(err);
+ }
+
+out:
+ /* never return NULL */
+ AuDebugOn(!inode);
+ AuTraceErrPtr(inode);
+ return inode;
+}
+
+/* lock free root dinfo */
+static int au_show_brs(struct seq_file *seq, struct super_block *sb)
+{
+ int err;
+ aufs_bindex_t bindex, bend;
+ struct path path;
+ struct au_hdentry *hdp;
+ struct au_branch *br;
+ au_br_perm_str_t perm;
+
+ err = 0;
+ bend = au_sbend(sb);
+ hdp = au_di(sb->s_root)->di_hdentry;
+ for (bindex = 0; !err && bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ path.mnt = au_br_mnt(br);
+ path.dentry = hdp[bindex].hd_dentry;
+ err = au_seq_path(seq, &path);
+ if (!err) {
+ au_optstr_br_perm(&perm, br->br_perm);
+ seq_printf(seq, "=%s", perm.a);
+ if (bindex != bend)
+ seq_putc(seq, ':');
+ }
+ }
+ if (unlikely(err || seq_has_overflowed(seq)))
+ err = -E2BIG;
+
+ return err;
+}
+
+static void au_show_wbr_create(struct seq_file *m, int v,
+ struct au_sbinfo *sbinfo)
+{
+ const char *pat;
+
+ AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+ seq_puts(m, ",create=");
+ pat = au_optstr_wbr_create(v);
+ switch (v) {
+ case AuWbrCreate_TDP:
+ case AuWbrCreate_RR:
+ case AuWbrCreate_MFS:
+ case AuWbrCreate_PMFS:
+ seq_puts(m, pat);
+ break;
+ case AuWbrCreate_MFSV:
+ seq_printf(m, /*pat*/"mfs:%lu",
+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+ / MSEC_PER_SEC);
+ break;
+ case AuWbrCreate_PMFSV:
+ seq_printf(m, /*pat*/"pmfs:%lu",
+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+ / MSEC_PER_SEC);
+ break;
+ case AuWbrCreate_MFSRR:
+ seq_printf(m, /*pat*/"mfsrr:%llu",
+ sbinfo->si_wbr_mfs.mfsrr_watermark);
+ break;
+ case AuWbrCreate_MFSRRV:
+ seq_printf(m, /*pat*/"mfsrr:%llu:%lu",
+ sbinfo->si_wbr_mfs.mfsrr_watermark,
+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+ / MSEC_PER_SEC);
+ break;
+ case AuWbrCreate_PMFSRR:
+ seq_printf(m, /*pat*/"pmfsrr:%llu",
+ sbinfo->si_wbr_mfs.mfsrr_watermark);
+ break;
+ case AuWbrCreate_PMFSRRV:
+ seq_printf(m, /*pat*/"pmfsrr:%llu:%lu",
+ sbinfo->si_wbr_mfs.mfsrr_watermark,
+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire)
+ / MSEC_PER_SEC);
+ break;
+ }
+}
+
+static int au_show_xino(struct seq_file *seq, struct super_block *sb)
+{
+#ifdef CONFIG_SYSFS
+ return 0;
+#else
+ int err;
+ const int len = sizeof(AUFS_XINO_FNAME) - 1;
+ aufs_bindex_t bindex, brid;
+ struct qstr *name;
+ struct file *f;
+ struct dentry *d, *h_root;
+ struct au_hdentry *hdp;
+
+ AuRwMustAnyLock(&sbinfo->si_rwsem);
+
+ err = 0;
+ f = au_sbi(sb)->si_xib;
+ if (!f)
+ goto out;
+
+ /* stop printing the default xino path on the first writable branch */
+ h_root = NULL;
+ brid = au_xino_brid(sb);
+ if (brid >= 0) {
+ bindex = au_br_index(sb, brid);
+ hdp = au_di(sb->s_root)->di_hdentry;
+ h_root = hdp[0 + bindex].hd_dentry;
+ }
+ d = f->f_path.dentry;
+ name = &d->d_name;
+ /* safe ->d_parent because the file is unlinked */
+ if (d->d_parent == h_root
+ && name->len == len
+ && !memcmp(name->name, AUFS_XINO_FNAME, len))
+ goto out;
+
+ seq_puts(seq, ",xino=");
+ err = au_xino_path(seq, f);
+
+out:
+ return err;
+#endif
+}
+
+/* seq_file will re-call me in case of too long string */
+static int aufs_show_options(struct seq_file *m, struct dentry *dentry)
+{
+ int err;
+ unsigned int mnt_flags, v;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+#define AuBool(name, str) do { \
+ v = au_opt_test(mnt_flags, name); \
+ if (v != au_opt_test(AuOpt_Def, name)) \
+ seq_printf(m, ",%s" #str, v ? "" : "no"); \
+} while (0)
+
+#define AuStr(name, str) do { \
+ v = mnt_flags & AuOptMask_##name; \
+ if (v != (AuOpt_Def & AuOptMask_##name)) \
+ seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \
+} while (0)
+
+#define AuUInt(name, str, val) do { \
+ if (val != AUFS_##name##_DEF) \
+ seq_printf(m, "," #str "=%u", val); \
+} while (0)
+
+ sb = dentry->d_sb;
+ if (sb->s_flags & MS_POSIXACL)
+ seq_puts(m, ",acl");
+
+ /* lock free root dinfo */
+ si_noflush_read_lock(sb);
+ sbinfo = au_sbi(sb);
+ seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo));
+
+ mnt_flags = au_mntflags(sb);
+ if (au_opt_test(mnt_flags, XINO)) {
+ err = au_show_xino(m, sb);
+ if (unlikely(err))
+ goto out;
+ } else
+ seq_puts(m, ",noxino");
+
+ AuBool(TRUNC_XINO, trunc_xino);
+ AuStr(UDBA, udba);
+ AuBool(SHWH, shwh);
+ AuBool(PLINK, plink);
+ AuBool(DIO, dio);
+ AuBool(DIRPERM1, dirperm1);
+
+ v = sbinfo->si_wbr_create;
+ if (v != AuWbrCreate_Def)
+ au_show_wbr_create(m, v, sbinfo);
+
+ v = sbinfo->si_wbr_copyup;
+ if (v != AuWbrCopyup_Def)
+ seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v));
+
+ v = au_opt_test(mnt_flags, ALWAYS_DIROPQ);
+ if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ))
+ seq_printf(m, ",diropq=%c", v ? 'a' : 'w');
+
+ AuUInt(DIRWH, dirwh, sbinfo->si_dirwh);
+
+ v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC;
+ AuUInt(RDCACHE, rdcache, v);
+
+ AuUInt(RDBLK, rdblk, sbinfo->si_rdblk);
+ AuUInt(RDHASH, rdhash, sbinfo->si_rdhash);
+
+ au_fhsm_show(m, sbinfo);
+
+ AuBool(SUM, sum);
+ /* AuBool(SUM_W, wsum); */
+ AuBool(WARN_PERM, warn_perm);
+ AuBool(VERBOSE, verbose);
+
+out:
+ /* be sure to print "br:" last */
+ if (!sysaufs_brs) {
+ seq_puts(m, ",br:");
+ au_show_brs(m, sb);
+ }
+ si_read_unlock(sb);
+ return 0;
+
+#undef AuBool
+#undef AuStr
+#undef AuUInt
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* sum mode which returns the summation for statfs(2) */
+
+static u64 au_add_till_max(u64 a, u64 b)
+{
+ u64 old;
+
+ old = a;
+ a += b;
+ if (old <= a)
+ return a;
+ return ULLONG_MAX;
+}
+
+static u64 au_mul_till_max(u64 a, long mul)
+{
+ u64 old;
+
+ old = a;
+ a *= mul;
+ if (old <= a)
+ return a;
+ return ULLONG_MAX;
+}
+
+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf)
+{
+ int err;
+ long bsize, factor;
+ u64 blocks, bfree, bavail, files, ffree;
+ aufs_bindex_t bend, bindex, i;
+ unsigned char shared;
+ struct path h_path;
+ struct super_block *h_sb;
+
+ err = 0;
+ bsize = LONG_MAX;
+ files = 0;
+ ffree = 0;
+ blocks = 0;
+ bfree = 0;
+ bavail = 0;
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ h_path.mnt = au_sbr_mnt(sb, bindex);
+ h_sb = h_path.mnt->mnt_sb;
+ shared = 0;
+ for (i = 0; !shared && i < bindex; i++)
+ shared = (au_sbr_sb(sb, i) == h_sb);
+ if (shared)
+ continue;
+
+ /* sb->s_root for NFS is unreliable */
+ h_path.dentry = h_path.mnt->mnt_root;
+ err = vfs_statfs(&h_path, buf);
+ if (unlikely(err))
+ goto out;
+
+ if (bsize > buf->f_bsize) {
+ /*
+ * we will reduce bsize, so we have to expand blocks
+ * etc. to match them again
+ */
+ factor = (bsize / buf->f_bsize);
+ blocks = au_mul_till_max(blocks, factor);
+ bfree = au_mul_till_max(bfree, factor);
+ bavail = au_mul_till_max(bavail, factor);
+ bsize = buf->f_bsize;
+ }
+
+ factor = (buf->f_bsize / bsize);
+ blocks = au_add_till_max(blocks,
+ au_mul_till_max(buf->f_blocks, factor));
+ bfree = au_add_till_max(bfree,
+ au_mul_till_max(buf->f_bfree, factor));
+ bavail = au_add_till_max(bavail,
+ au_mul_till_max(buf->f_bavail, factor));
+ files = au_add_till_max(files, buf->f_files);
+ ffree = au_add_till_max(ffree, buf->f_ffree);
+ }
+
+ buf->f_bsize = bsize;
+ buf->f_blocks = blocks;
+ buf->f_bfree = bfree;
+ buf->f_bavail = bavail;
+ buf->f_files = files;
+ buf->f_ffree = ffree;
+ buf->f_frsize = 0;
+
+out:
+ return err;
+}
+
+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ int err;
+ struct path h_path;
+ struct super_block *sb;
+
+ /* lock free root dinfo */
+ sb = dentry->d_sb;
+ si_noflush_read_lock(sb);
+ if (!au_opt_test(au_mntflags(sb), SUM)) {
+ /* sb->s_root for NFS is unreliable */
+ h_path.mnt = au_sbr_mnt(sb, 0);
+ h_path.dentry = h_path.mnt->mnt_root;
+ err = vfs_statfs(&h_path, buf);
+ } else
+ err = au_statfs_sum(sb, buf);
+ si_read_unlock(sb);
+
+ if (!err) {
+ buf->f_type = AUFS_SUPER_MAGIC;
+ buf->f_namelen = AUFS_MAX_NAMELEN;
+ memset(&buf->f_fsid, 0, sizeof(buf->f_fsid));
+ }
+ /* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int aufs_sync_fs(struct super_block *sb, int wait)
+{
+ int err, e;
+ aufs_bindex_t bend, bindex;
+ struct au_branch *br;
+ struct super_block *h_sb;
+
+ err = 0;
+ si_noflush_read_lock(sb);
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!au_br_writable(br->br_perm))
+ continue;
+
+ h_sb = au_sbr_sb(sb, bindex);
+ if (h_sb->s_op->sync_fs) {
+ e = h_sb->s_op->sync_fs(h_sb, wait);
+ if (unlikely(e && !err))
+ err = e;
+ /* go on even if an error happens */
+ }
+ }
+ si_read_unlock(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* final actions when unmounting a file system */
+static void aufs_put_super(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = au_sbi(sb);
+ if (!sbinfo)
+ return;
+
+ dbgaufs_si_fin(sbinfo);
+ kobject_put(&sbinfo->si_kobj);
+}
+
+/* ---------------------------------------------------------------------- */
+
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb,
+ struct super_block *sb, void *arg)
+{
+ void *array;
+ unsigned long long n, sz;
+
+ array = NULL;
+ n = 0;
+ if (!*hint)
+ goto out;
+
+ if (*hint > ULLONG_MAX / sizeof(array)) {
+ array = ERR_PTR(-EMFILE);
+ pr_err("hint %llu\n", *hint);
+ goto out;
+ }
+
+ sz = sizeof(array) * *hint;
+ array = kzalloc(sz, GFP_NOFS);
+ if (unlikely(!array))
+ array = vzalloc(sz);
+ if (unlikely(!array)) {
+ array = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ n = cb(sb, array, *hint, arg);
+ AuDebugOn(n > *hint);
+
+out:
+ *hint = n;
+ return array;
+}
+
+static unsigned long long au_iarray_cb(struct super_block *sb, void *a,
+ unsigned long long max __maybe_unused,
+ void *arg)
+{
+ unsigned long long n;
+ struct inode **p, *inode;
+ struct list_head *head;
+
+ n = 0;
+ p = a;
+ head = arg;
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(inode, head, i_sb_list) {
+ if (!is_bad_inode(inode)
+ && au_ii(inode)->ii_bstart >= 0) {
+ spin_lock(&inode->i_lock);
+ if (atomic_read(&inode->i_count)) {
+ au_igrab(inode);
+ *p++ = inode;
+ n++;
+ AuDebugOn(n > max);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+
+ return n;
+}
+
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max)
+{
+ *max = atomic_long_read(&au_sbi(sb)->si_ninodes);
+ return au_array_alloc(max, au_iarray_cb, sb, &sb->s_inodes);
+}
+
+void au_iarray_free(struct inode **a, unsigned long long max)
+{
+ unsigned long long ull;
+
+ for (ull = 0; ull < max; ull++)
+ iput(a[ull]);
+ kvfree(a);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * refresh dentry and inode at remount time.
+ */
+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */
+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags,
+ struct dentry *parent)
+{
+ int err;
+
+ di_write_lock_child(dentry);
+ di_read_lock_parent(parent, AuLock_IR);
+ err = au_refresh_dentry(dentry, parent);
+ if (!err && dir_flags)
+ au_hn_reset(d_inode(dentry), dir_flags);
+ di_read_unlock(parent, AuLock_IR);
+ di_write_unlock(dentry);
+
+ return err;
+}
+
+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen,
+ struct au_sbinfo *sbinfo,
+ const unsigned int dir_flags, unsigned int do_idop)
+{
+ int err;
+ struct dentry *parent;
+
+ err = 0;
+ parent = dget_parent(dentry);
+ if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) {
+ if (d_really_is_positive(dentry)) {
+ if (!d_is_dir(dentry))
+ err = au_do_refresh(dentry, /*dir_flags*/0,
+ parent);
+ else {
+ err = au_do_refresh(dentry, dir_flags, parent);
+ if (unlikely(err))
+ au_fset_si(sbinfo, FAILED_REFRESH_DIR);
+ }
+ } else
+ err = au_do_refresh(dentry, /*dir_flags*/0, parent);
+ AuDbgDentry(dentry);
+ }
+ dput(parent);
+
+ if (!err) {
+ if (do_idop)
+ au_refresh_dop(dentry, /*force_reval*/0);
+ } else
+ au_refresh_dop(dentry, /*force_reval*/1);
+
+ AuTraceErr(err);
+ return err;
+}
+
+static int au_refresh_d(struct super_block *sb, unsigned int do_idop)
+{
+ int err, i, j, ndentry, e;
+ unsigned int sigen;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries, *d;
+ struct au_sbinfo *sbinfo;
+ struct dentry *root = sb->s_root;
+ const unsigned int dir_flags = au_hi_flags(d_inode(root), /*isdir*/1);
+
+ if (do_idop)
+ au_refresh_dop(root, /*force_reval*/0);
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_dcsub_pages(&dpages, root, NULL, NULL);
+ if (unlikely(err))
+ goto out_dpages;
+
+ sigen = au_sigen(sb);
+ sbinfo = au_sbi(sb);
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++) {
+ d = dentries[j];
+ e = au_do_refresh_d(d, sigen, sbinfo, dir_flags,
+ do_idop);
+ if (unlikely(e && !err))
+ err = e;
+ /* go on even err */
+ }
+ }
+
+out_dpages:
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static int au_refresh_i(struct super_block *sb, unsigned int do_idop)
+{
+ int err, e;
+ unsigned int sigen;
+ unsigned long long max, ull;
+ struct inode *inode, **array;
+
+ array = au_iarray_alloc(sb, &max);
+ err = PTR_ERR(array);
+ if (IS_ERR(array))
+ goto out;
+
+ err = 0;
+ sigen = au_sigen(sb);
+ for (ull = 0; ull < max; ull++) {
+ inode = array[ull];
+ if (unlikely(!inode))
+ break;
+
+ e = 0;
+ ii_write_lock_child(inode);
+ if (au_iigen(inode, NULL) != sigen) {
+ e = au_refresh_hinode_self(inode);
+ if (unlikely(e)) {
+ au_refresh_iop(inode, /*force_getattr*/1);
+ pr_err("error %d, i%lu\n", e, inode->i_ino);
+ if (!err)
+ err = e;
+ /* go on even if err */
+ }
+ }
+ if (!e && do_idop)
+ au_refresh_iop(inode, /*force_getattr*/0);
+ ii_write_unlock(inode);
+ }
+
+ au_iarray_free(array, max);
+
+out:
+ return err;
+}
+
+static void au_remount_refresh(struct super_block *sb, unsigned int do_idop)
+{
+ int err, e;
+ unsigned int udba;
+ aufs_bindex_t bindex, bend;
+ struct dentry *root;
+ struct inode *inode;
+ struct au_branch *br;
+ struct au_sbinfo *sbi;
+
+ au_sigen_inc(sb);
+ sbi = au_sbi(sb);
+ au_fclr_si(sbi, FAILED_REFRESH_DIR);
+
+ root = sb->s_root;
+ DiMustNoWaiters(root);
+ inode = d_inode(root);
+ IiMustNoWaiters(inode);
+
+ udba = au_opt_udba(sb);
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ err = au_hnotify_reset_br(udba, br, br->br_perm);
+ if (unlikely(err))
+ AuIOErr("hnotify failed on br %d, %d, ignored\n",
+ bindex, err);
+ /* go on even if err */
+ }
+ au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1));
+
+ if (do_idop) {
+ if (au_ftest_si(sbi, NO_DREVAL)) {
+ AuDebugOn(sb->s_d_op == &aufs_dop_noreval);
+ sb->s_d_op = &aufs_dop_noreval;
+ AuDebugOn(sbi->si_iop_array == aufs_iop_nogetattr);
+ sbi->si_iop_array = aufs_iop_nogetattr;
+ } else {
+ AuDebugOn(sb->s_d_op == &aufs_dop);
+ sb->s_d_op = &aufs_dop;
+ AuDebugOn(sbi->si_iop_array == aufs_iop);
+ sbi->si_iop_array = aufs_iop;
+ }
+ pr_info("reset to %pf and %pf\n",
+ sb->s_d_op, sbi->si_iop_array);
+ }
+
+ di_write_unlock(root);
+ err = au_refresh_d(sb, do_idop);
+ e = au_refresh_i(sb, do_idop);
+ if (unlikely(e && !err))
+ err = e;
+ /* aufs_write_lock() calls ..._child() */
+ di_write_lock_child(root);
+
+ au_cpup_attr_all(inode, /*force*/1);
+
+ if (unlikely(err))
+ AuIOErr("refresh failed, ignored, %d\n", err);
+}
+
+/* stop extra interpretation of errno in mount(8), and strange error messages */
+static int cvt_err(int err)
+{
+ AuTraceErr(err);
+
+ switch (err) {
+ case -ENOENT:
+ case -ENOTDIR:
+ case -EEXIST:
+ case -EIO:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ int err, do_dx;
+ unsigned int mntflags;
+ struct au_opts opts = {
+ .opt = NULL
+ };
+ struct dentry *root;
+ struct inode *inode;
+ struct au_sbinfo *sbinfo;
+
+ err = 0;
+ root = sb->s_root;
+ if (!data || !*data) {
+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (!err) {
+ di_write_lock_child(root);
+ err = au_opts_verify(sb, *flags, /*pending*/0);
+ aufs_write_unlock(root);
+ }
+ goto out;
+ }
+
+ err = -ENOMEM;
+ opts.opt = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!opts.opt))
+ goto out;
+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+ opts.flags = AuOpts_REMOUNT;
+ opts.sb_flags = *flags;
+
+ /* parse it before aufs lock */
+ err = au_opts_parse(sb, data, &opts);
+ if (unlikely(err))
+ goto out_opts;
+
+ sbinfo = au_sbi(sb);
+ inode = d_inode(root);
+ inode_lock(inode);
+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out_mtx;
+ di_write_lock_child(root);
+
+ /* au_opts_remount() may return an error */
+ err = au_opts_remount(sb, &opts);
+ au_opts_free(&opts);
+
+ if (au_ftest_opts(opts.flags, REFRESH))
+ au_remount_refresh(sb, au_ftest_opts(opts.flags, REFRESH_IDOP));
+
+ if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) {
+ mntflags = au_mntflags(sb);
+ do_dx = !!au_opt_test(mntflags, DIO);
+ au_dy_arefresh(do_dx);
+ }
+
+ au_fhsm_wrote_all(sb, /*force*/1); /* ?? */
+ aufs_write_unlock(root);
+
+out_mtx:
+ inode_unlock(inode);
+out_opts:
+ free_page((unsigned long)opts.opt);
+out:
+ err = cvt_err(err);
+ AuTraceErr(err);
+ return err;
+}
+
+static const struct super_operations aufs_sop = {
+ .alloc_inode = aufs_alloc_inode,
+ .destroy_inode = aufs_destroy_inode,
+ /* always deleting, no clearing */
+ .drop_inode = generic_delete_inode,
+ .show_options = aufs_show_options,
+ .statfs = aufs_statfs,
+ .put_super = aufs_put_super,
+ .sync_fs = aufs_sync_fs,
+ .remount_fs = aufs_remount_fs
+};
+
+/* ---------------------------------------------------------------------- */
+
+static int alloc_root(struct super_block *sb)
+{
+ int err;
+ struct inode *inode;
+ struct dentry *root;
+
+ err = -ENOMEM;
+ inode = au_iget_locked(sb, AUFS_ROOT_INO);
+ err = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out;
+
+ inode->i_op = aufs_iop + AuIop_DIR; /* with getattr by default */
+ inode->i_fop = &aufs_dir_fop;
+ inode->i_mode = S_IFDIR;
+ set_nlink(inode, 2);
+ unlock_new_inode(inode);
+
+ root = d_make_root(inode);
+ if (unlikely(!root))
+ goto out;
+ err = PTR_ERR(root);
+ if (IS_ERR(root))
+ goto out;
+
+ err = au_di_init(root);
+ if (!err) {
+ sb->s_root = root;
+ return 0; /* success */
+ }
+ dput(root);
+
+out:
+ return err;
+}
+
+static int aufs_fill_super(struct super_block *sb, void *raw_data,
+ int silent __maybe_unused)
+{
+ int err;
+ struct au_opts opts = {
+ .opt = NULL
+ };
+ struct au_sbinfo *sbinfo;
+ struct dentry *root;
+ struct inode *inode;
+ char *arg = raw_data;
+
+ if (unlikely(!arg || !*arg)) {
+ err = -EINVAL;
+ pr_err("no arg\n");
+ goto out;
+ }
+
+ err = -ENOMEM;
+ opts.opt = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!opts.opt))
+ goto out;
+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt);
+ opts.sb_flags = sb->s_flags;
+
+ err = au_si_alloc(sb);
+ if (unlikely(err))
+ goto out_opts;
+ sbinfo = au_sbi(sb);
+
+ /* all timestamps always follow the ones on the branch */
+ sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
+ sb->s_op = &aufs_sop;
+ sb->s_d_op = &aufs_dop;
+ sb->s_magic = AUFS_SUPER_MAGIC;
+ sb->s_maxbytes = 0;
+ sb->s_stack_depth = 1;
+ au_export_init(sb);
+ /* au_xattr_init(sb); */
+
+ err = alloc_root(sb);
+ if (unlikely(err)) {
+ si_write_unlock(sb);
+ goto out_info;
+ }
+ root = sb->s_root;
+ inode = d_inode(root);
+
+ /*
+ * actually we can parse options regardless aufs lock here.
+ * but at remount time, parsing must be done before aufs lock.
+ * so we follow the same rule.
+ */
+ ii_write_lock_parent(inode);
+ aufs_write_unlock(root);
+ err = au_opts_parse(sb, arg, &opts);
+ if (unlikely(err))
+ goto out_root;
+
+ /* lock vfs_inode first, then aufs. */
+ inode_lock(inode);
+ aufs_write_lock(root);
+ err = au_opts_mount(sb, &opts);
+ au_opts_free(&opts);
+ if (!err && au_ftest_si(sbinfo, NO_DREVAL)) {
+ sb->s_d_op = &aufs_dop_noreval;
+ pr_info("%pf\n", sb->s_d_op);
+ au_refresh_dop(root, /*force_reval*/0);
+ sbinfo->si_iop_array = aufs_iop_nogetattr;
+ au_refresh_iop(inode, /*force_getattr*/0);
+ }
+ aufs_write_unlock(root);
+ inode_unlock(inode);
+ if (!err)
+ goto out_opts; /* success */
+
+out_root:
+ dput(root);
+ sb->s_root = NULL;
+out_info:
+ dbgaufs_si_fin(sbinfo);
+ kobject_put(&sbinfo->si_kobj);
+ sb->s_fs_info = NULL;
+out_opts:
+ free_page((unsigned long)opts.opt);
+out:
+ AuTraceErr(err);
+ err = cvt_err(err);
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name __maybe_unused,
+ void *raw_data)
+{
+ struct dentry *root;
+ struct super_block *sb;
+
+ /* all timestamps always follow the ones on the branch */
+ /* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */
+ root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super);
+ if (IS_ERR(root))
+ goto out;
+
+ sb = root->d_sb;
+ si_write_lock(sb, !AuLock_FLUSH);
+ sysaufs_brs_add(sb, 0);
+ si_write_unlock(sb);
+ au_sbilist_add(sb);
+
+out:
+ return root;
+}
+
+static void aufs_kill_sb(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ sbinfo = au_sbi(sb);
+ if (sbinfo) {
+ au_sbilist_del(sb);
+ aufs_write_lock(sb->s_root);
+ au_fhsm_fin(sb);
+ if (sbinfo->si_wbr_create_ops->fin)
+ sbinfo->si_wbr_create_ops->fin(sb);
+ if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) {
+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE);
+ au_remount_refresh(sb, /*do_idop*/0);
+ }
+ if (au_opt_test(sbinfo->si_mntflags, PLINK))
+ au_plink_put(sb, /*verbose*/1);
+ au_xino_clr(sb);
+ sbinfo->si_sb = NULL;
+ aufs_write_unlock(sb->s_root);
+ au_nwt_flush(&sbinfo->si_nowait);
+ }
+ kill_anon_super(sb);
+}
+
+struct file_system_type aufs_fs_type = {
+ .name = AUFS_FSTYPE,
+ /* a race between rename and others */
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
+ .mount = aufs_mount,
+ .kill_sb = aufs_kill_sb,
+ /* no need to __module_get() and module_put(). */
+ .owner = THIS_MODULE,
+};
diff --git a/fs/aufs/super.h b/fs/aufs/super.h
new file mode 100644
index 000000000..ba2855ad6
--- /dev/null
+++ b/fs/aufs/super.h
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * super_block operations
+ */
+
+#ifndef __AUFS_SUPER_H__
+#define __AUFS_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include "rwsem.h"
+#include "spl.h"
+#include "wkq.h"
+
+/* policies to select one among multiple writable branches */
+struct au_wbr_copyup_operations {
+ int (*copyup)(struct dentry *dentry);
+};
+
+#define AuWbr_DIR 1 /* target is a dir */
+#define AuWbr_PARENT (1 << 1) /* always require a parent */
+
+#define au_ftest_wbr(flags, name) ((flags) & AuWbr_##name)
+#define au_fset_wbr(flags, name) { (flags) |= AuWbr_##name; }
+#define au_fclr_wbr(flags, name) { (flags) &= ~AuWbr_##name; }
+
+struct au_wbr_create_operations {
+ int (*create)(struct dentry *dentry, unsigned int flags);
+ int (*init)(struct super_block *sb);
+ int (*fin)(struct super_block *sb);
+};
+
+struct au_wbr_mfs {
+ struct mutex mfs_lock; /* protect this structure */
+ unsigned long mfs_jiffy;
+ unsigned long mfs_expire;
+ aufs_bindex_t mfs_bindex;
+
+ unsigned long long mfsrr_bytes;
+ unsigned long long mfsrr_watermark;
+};
+
+struct pseudo_link {
+ union {
+ struct hlist_node hlist;
+ struct rcu_head rcu;
+ };
+ struct inode *inode;
+};
+
+#define AuPlink_NHASH 100
+static inline int au_plink_hash(ino_t ino)
+{
+ return ino % AuPlink_NHASH;
+}
+
+/* File-based Hierarchical Storage Management */
+struct au_fhsm {
+#ifdef CONFIG_AUFS_FHSM
+ /* allow only one process who can receive the notification */
+ spinlock_t fhsm_spin;
+ pid_t fhsm_pid;
+ wait_queue_head_t fhsm_wqh;
+ atomic_t fhsm_readable;
+
+ /* these are protected by si_rwsem */
+ unsigned long fhsm_expire;
+ aufs_bindex_t fhsm_bottom;
+#endif
+};
+
+#define AU_PIDSTEP (int)(BITS_TO_LONGS(PID_MAX_DEFAULT) * BITS_PER_LONG)
+#define AU_NPIDMAP (int)DIV_ROUND_UP(PID_MAX_LIMIT, AU_PIDSTEP)
+struct au_si_pid {
+ unsigned long *pid_bitmap[AU_NPIDMAP];
+ struct mutex pid_mtx;
+};
+
+struct au_branch;
+struct au_sbinfo {
+ /* nowait tasks in the system-wide workqueue */
+ struct au_nowait_tasks si_nowait;
+
+ /*
+ * tried sb->s_umount, but failed due to the dependecy between i_mutex.
+ * rwsem for au_sbinfo is necessary.
+ */
+ struct au_rwsem si_rwsem;
+
+ /* prevent recursive locking in deleting inode */
+ struct au_si_pid au_si_pid;
+
+ /*
+ * dirty approach to protect sb->sb_inodes and ->s_files (gone) from
+ * remount.
+ */
+ atomic_long_t si_ninodes, si_nfiles;
+
+ /* branch management */
+ unsigned int si_generation;
+
+ /* see AuSi_ flags */
+ unsigned char au_si_status;
+
+ aufs_bindex_t si_bend;
+
+ /* dirty trick to keep br_id plus */
+ unsigned int si_last_br_id :
+ sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1;
+ struct au_branch **si_branch;
+
+ /* policy to select a writable branch */
+ unsigned char si_wbr_copyup;
+ unsigned char si_wbr_create;
+ struct au_wbr_copyup_operations *si_wbr_copyup_ops;
+ struct au_wbr_create_operations *si_wbr_create_ops;
+
+ /* round robin */
+ atomic_t si_wbr_rr_next;
+
+ /* most free space */
+ struct au_wbr_mfs si_wbr_mfs;
+
+ /* File-based Hierarchical Storage Management */
+ struct au_fhsm si_fhsm;
+
+ /* mount flags */
+ /* include/asm-ia64/siginfo.h defines a macro named si_flags */
+ unsigned int si_mntflags;
+
+ /* external inode number (bitmap and translation table) */
+ vfs_readf_t si_xread;
+ vfs_writef_t si_xwrite;
+ struct file *si_xib;
+ struct mutex si_xib_mtx; /* protect xib members */
+ unsigned long *si_xib_buf;
+ unsigned long si_xib_last_pindex;
+ int si_xib_next_bit;
+ aufs_bindex_t si_xino_brid;
+ unsigned long si_xino_jiffy;
+ unsigned long si_xino_expire;
+ /* reserved for future use */
+ /* unsigned long long si_xib_limit; */ /* Max xib file size */
+
+#ifdef CONFIG_AUFS_EXPORT
+ /* i_generation */
+ struct file *si_xigen;
+ atomic_t si_xigen_next;
+#endif
+
+ /* dirty trick to suppoer atomic_open */
+ struct au_sphlhead si_aopen;
+
+ /* vdir parameters */
+ unsigned long si_rdcache; /* max cache time in jiffies */
+ unsigned int si_rdblk; /* deblk size */
+ unsigned int si_rdhash; /* hash size */
+
+ /*
+ * If the number of whiteouts are larger than si_dirwh, leave all of
+ * them after au_whtmp_ren to reduce the cost of rmdir(2).
+ * future fsck.aufs or kernel thread will remove them later.
+ * Otherwise, remove all whiteouts and the dir in rmdir(2).
+ */
+ unsigned int si_dirwh;
+
+ /* pseudo_link list */
+ struct au_sphlhead si_plink[AuPlink_NHASH];
+ wait_queue_head_t si_plink_wq;
+ spinlock_t si_plink_maint_lock;
+ pid_t si_plink_maint_pid;
+
+ /* file list */
+ struct au_sphlhead si_files;
+
+ /* with/without getattr, brother of sb->s_d_op */
+ struct inode_operations *si_iop_array;
+
+ /*
+ * sysfs and lifetime management.
+ * this is not a small structure and it may be a waste of memory in case
+ * of sysfs is disabled, particulary when many aufs-es are mounted.
+ * but using sysfs is majority.
+ */
+ struct kobject si_kobj;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *si_dbgaufs;
+ struct dentry *si_dbgaufs_plink;
+ struct dentry *si_dbgaufs_xib;
+#ifdef CONFIG_AUFS_EXPORT
+ struct dentry *si_dbgaufs_xigen;
+#endif
+#endif
+
+#ifdef CONFIG_AUFS_SBILIST
+ struct list_head si_list;
+#endif
+
+ /* dirty, necessary for unmounting, sysfs and sysrq */
+ struct super_block *si_sb;
+};
+
+/* sbinfo status flags */
+/*
+ * set true when refresh_dirs() failed at remount time.
+ * then try refreshing dirs at access time again.
+ * if it is false, refreshing dirs at access time is unnecesary
+ */
+#define AuSi_FAILED_REFRESH_DIR 1
+#define AuSi_FHSM (1 << 1) /* fhsm is active now */
+#define AuSi_NO_DREVAL (1 << 2) /* disable all d_revalidate */
+
+#ifndef CONFIG_AUFS_FHSM
+#undef AuSi_FHSM
+#define AuSi_FHSM 0
+#endif
+
+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi,
+ unsigned int flag)
+{
+ AuRwMustAnyLock(&sbi->si_rwsem);
+ return sbi->au_si_status & flag;
+}
+#define au_ftest_si(sbinfo, name) au_do_ftest_si(sbinfo, AuSi_##name)
+#define au_fset_si(sbinfo, name) do { \
+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+ (sbinfo)->au_si_status |= AuSi_##name; \
+} while (0)
+#define au_fclr_si(sbinfo, name) do { \
+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \
+ (sbinfo)->au_si_status &= ~AuSi_##name; \
+} while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* policy to select one among writable branches */
+#define AuWbrCopyup(sbinfo, ...) \
+ ((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__))
+#define AuWbrCreate(sbinfo, ...) \
+ ((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__))
+
+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */
+#define AuLock_DW 1 /* write-lock dentry */
+#define AuLock_IR (1 << 1) /* read-lock inode */
+#define AuLock_IW (1 << 2) /* write-lock inode */
+#define AuLock_FLUSH (1 << 3) /* wait for 'nowait' tasks */
+#define AuLock_DIRS (1 << 4) /* target is a pair of dirs */
+#define AuLock_NOPLM (1 << 5) /* return err in plm mode */
+#define AuLock_NOPLMW (1 << 6) /* wait for plm mode ends */
+#define AuLock_GEN (1 << 7) /* test digen/iigen */
+#define au_ftest_lock(flags, name) ((flags) & AuLock_##name)
+#define au_fset_lock(flags, name) \
+ do { (flags) |= AuLock_##name; } while (0)
+#define au_fclr_lock(flags, name) \
+ do { (flags) &= ~AuLock_##name; } while (0)
+
+/* ---------------------------------------------------------------------- */
+
+/* super.c */
+extern struct file_system_type aufs_fs_type;
+struct inode *au_iget_locked(struct super_block *sb, ino_t ino);
+typedef unsigned long long (*au_arraycb_t)(struct super_block *sb, void *array,
+ unsigned long long max, void *arg);
+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb,
+ struct super_block *sb, void *arg);
+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max);
+void au_iarray_free(struct inode **a, unsigned long long max);
+
+/* sbinfo.c */
+void au_si_free(struct kobject *kobj);
+int au_si_alloc(struct super_block *sb);
+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr);
+
+unsigned int au_sigen_inc(struct super_block *sb);
+aufs_bindex_t au_new_br_id(struct super_block *sb);
+
+int si_read_lock(struct super_block *sb, int flags);
+int si_write_lock(struct super_block *sb, int flags);
+int aufs_read_lock(struct dentry *dentry, int flags);
+void aufs_read_unlock(struct dentry *dentry, int flags);
+void aufs_write_lock(struct dentry *dentry);
+void aufs_write_unlock(struct dentry *dentry);
+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags);
+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2);
+
+/* wbr_policy.c */
+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[];
+extern struct au_wbr_create_operations au_wbr_create_ops[];
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst);
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex);
+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart);
+
+/* mvdown.c */
+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg);
+
+#ifdef CONFIG_AUFS_FHSM
+/* fhsm.c */
+
+static inline pid_t au_fhsm_pid(struct au_fhsm *fhsm)
+{
+ pid_t pid;
+
+ spin_lock(&fhsm->fhsm_spin);
+ pid = fhsm->fhsm_pid;
+ spin_unlock(&fhsm->fhsm_spin);
+
+ return pid;
+}
+
+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force);
+void au_fhsm_wrote_all(struct super_block *sb, int force);
+int au_fhsm_fd(struct super_block *sb, int oflags);
+int au_fhsm_br_alloc(struct au_branch *br);
+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex);
+void au_fhsm_fin(struct super_block *sb);
+void au_fhsm_init(struct au_sbinfo *sbinfo);
+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec);
+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo);
+#else
+AuStubVoid(au_fhsm_wrote, struct super_block *sb, aufs_bindex_t bindex,
+ int force)
+AuStubVoid(au_fhsm_wrote_all, struct super_block *sb, int force)
+AuStub(int, au_fhsm_fd, return -EOPNOTSUPP, struct super_block *sb, int oflags)
+AuStub(pid_t, au_fhsm_pid, return 0, struct au_fhsm *fhsm)
+AuStubInt0(au_fhsm_br_alloc, struct au_branch *br)
+AuStubVoid(au_fhsm_set_bottom, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(au_fhsm_fin, struct super_block *sb)
+AuStubVoid(au_fhsm_init, struct au_sbinfo *sbinfo)
+AuStubVoid(au_fhsm_set, struct au_sbinfo *sbinfo, unsigned int sec)
+AuStubVoid(au_fhsm_show, struct seq_file *seq, struct au_sbinfo *sbinfo)
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct au_sbinfo *au_sbi(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_EXPORT
+int au_test_nfsd(void);
+void au_export_init(struct super_block *sb);
+void au_xigen_inc(struct inode *inode);
+int au_xigen_new(struct inode *inode);
+int au_xigen_set(struct super_block *sb, struct file *base);
+void au_xigen_clr(struct super_block *sb);
+
+static inline int au_busy_or_stale(void)
+{
+ if (!au_test_nfsd())
+ return -EBUSY;
+ return -ESTALE;
+}
+#else
+AuStubInt0(au_test_nfsd, void)
+AuStubVoid(au_export_init, struct super_block *sb)
+AuStubVoid(au_xigen_inc, struct inode *inode)
+AuStubInt0(au_xigen_new, struct inode *inode)
+AuStubInt0(au_xigen_set, struct super_block *sb, struct file *base)
+AuStubVoid(au_xigen_clr, struct super_block *sb)
+AuStub(int, au_busy_or_stale, return -EBUSY, void)
+#endif /* CONFIG_AUFS_EXPORT */
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_SBILIST
+/* module.c */
+extern struct au_splhead au_sbilist;
+
+static inline void au_sbilist_init(void)
+{
+ au_spl_init(&au_sbilist);
+}
+
+static inline void au_sbilist_add(struct super_block *sb)
+{
+ au_spl_add(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+static inline void au_sbilist_del(struct super_block *sb)
+{
+ au_spl_del(&au_sbi(sb)->si_list, &au_sbilist);
+}
+
+#ifdef CONFIG_AUFS_MAGIC_SYSRQ
+static inline void au_sbilist_lock(void)
+{
+ spin_lock(&au_sbilist.spin);
+}
+
+static inline void au_sbilist_unlock(void)
+{
+ spin_unlock(&au_sbilist.spin);
+}
+#define AuGFP_SBILIST GFP_ATOMIC
+#else
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST GFP_NOFS
+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */
+#else
+AuStubVoid(au_sbilist_init, void)
+AuStubVoid(au_sbilist_add, struct super_block *sb)
+AuStubVoid(au_sbilist_del, struct super_block *sb)
+AuStubVoid(au_sbilist_lock, void)
+AuStubVoid(au_sbilist_unlock, void)
+#define AuGFP_SBILIST GFP_NOFS
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo)
+{
+ /*
+ * This function is a dynamic '__init' function actually,
+ * so the tiny check for si_rwsem is unnecessary.
+ */
+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */
+#ifdef CONFIG_DEBUG_FS
+ sbinfo->si_dbgaufs = NULL;
+ sbinfo->si_dbgaufs_plink = NULL;
+ sbinfo->si_dbgaufs_xib = NULL;
+#ifdef CONFIG_AUFS_EXPORT
+ sbinfo->si_dbgaufs_xigen = NULL;
+#endif
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+static inline void si_pid_idx_bit(int *idx, pid_t *bit)
+{
+ /* the origin of pid is 1, but the bitmap's is 0 */
+ *bit = current->pid - 1;
+ *idx = *bit / AU_PIDSTEP;
+ *bit %= AU_PIDSTEP;
+}
+
+static inline int si_pid_test(struct super_block *sb)
+{
+ pid_t bit;
+ int idx;
+ unsigned long *bitmap;
+
+ si_pid_idx_bit(&idx, &bit);
+ bitmap = au_sbi(sb)->au_si_pid.pid_bitmap[idx];
+ if (bitmap)
+ return test_bit(bit, bitmap);
+ return 0;
+}
+
+static inline void si_pid_clr(struct super_block *sb)
+{
+ pid_t bit;
+ int idx;
+ unsigned long *bitmap;
+
+ si_pid_idx_bit(&idx, &bit);
+ bitmap = au_sbi(sb)->au_si_pid.pid_bitmap[idx];
+ BUG_ON(!bitmap);
+ AuDebugOn(!test_bit(bit, bitmap));
+ clear_bit(bit, bitmap);
+ /* smp_mb(); */
+}
+
+void si_pid_set(struct super_block *sb);
+
+/* ---------------------------------------------------------------------- */
+
+/* lock superblock. mainly for entry point functions */
+/*
+ * __si_read_lock, __si_write_lock,
+ * __si_read_unlock, __si_write_unlock, __si_downgrade_lock
+ */
+AuSimpleRwsemFuncs(__si, struct super_block *sb, &au_sbi(sb)->si_rwsem);
+
+#define SiMustNoWaiters(sb) AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem)
+#define SiMustAnyLock(sb) AuRwMustAnyLock(&au_sbi(sb)->si_rwsem)
+#define SiMustWriteLock(sb) AuRwMustWriteLock(&au_sbi(sb)->si_rwsem)
+
+static inline void si_noflush_read_lock(struct super_block *sb)
+{
+ __si_read_lock(sb);
+ si_pid_set(sb);
+}
+
+static inline int si_noflush_read_trylock(struct super_block *sb)
+{
+ int locked;
+
+ locked = __si_read_trylock(sb);
+ if (locked)
+ si_pid_set(sb);
+ return locked;
+}
+
+static inline void si_noflush_write_lock(struct super_block *sb)
+{
+ __si_write_lock(sb);
+ si_pid_set(sb);
+}
+
+static inline int si_noflush_write_trylock(struct super_block *sb)
+{
+ int locked;
+
+ locked = __si_write_trylock(sb);
+ if (locked)
+ si_pid_set(sb);
+ return locked;
+}
+
+#if 0 /* reserved */
+static inline int si_read_trylock(struct super_block *sb, int flags)
+{
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+ return si_noflush_read_trylock(sb);
+}
+#endif
+
+static inline void si_read_unlock(struct super_block *sb)
+{
+ si_pid_clr(sb);
+ __si_read_unlock(sb);
+}
+
+#if 0 /* reserved */
+static inline int si_write_trylock(struct super_block *sb, int flags)
+{
+ if (au_ftest_lock(flags, FLUSH))
+ au_nwt_flush(&au_sbi(sb)->si_nowait);
+ return si_noflush_write_trylock(sb);
+}
+#endif
+
+static inline void si_write_unlock(struct super_block *sb)
+{
+ si_pid_clr(sb);
+ __si_write_unlock(sb);
+}
+
+#if 0 /* reserved */
+static inline void si_downgrade_lock(struct super_block *sb)
+{
+ __si_downgrade_lock(sb);
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+static inline aufs_bindex_t au_sbend(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_bend;
+}
+
+static inline unsigned int au_mntflags(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_mntflags;
+}
+
+static inline unsigned int au_sigen(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_generation;
+}
+
+static inline void au_ninodes_inc(struct super_block *sb)
+{
+ atomic_long_inc(&au_sbi(sb)->si_ninodes);
+}
+
+static inline void au_ninodes_dec(struct super_block *sb)
+{
+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_ninodes));
+ atomic_long_dec(&au_sbi(sb)->si_ninodes);
+}
+
+static inline void au_nfiles_inc(struct super_block *sb)
+{
+ atomic_long_inc(&au_sbi(sb)->si_nfiles);
+}
+
+static inline void au_nfiles_dec(struct super_block *sb)
+{
+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_nfiles));
+ atomic_long_dec(&au_sbi(sb)->si_nfiles);
+}
+
+static inline struct au_branch *au_sbr(struct super_block *sb,
+ aufs_bindex_t bindex)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_branch[0 + bindex];
+}
+
+static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid)
+{
+ SiMustWriteLock(sb);
+ au_sbi(sb)->si_xino_brid = brid;
+}
+
+static inline aufs_bindex_t au_xino_brid(struct super_block *sb)
+{
+ SiMustAnyLock(sb);
+ return au_sbi(sb)->si_xino_brid;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_SUPER_H__ */
diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c
new file mode 100644
index 000000000..8ec10fb31
--- /dev/null
+++ b/fs/aufs/sysaufs.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sysfs interface and lifetime management
+ * they are necessary regardless sysfs is disabled.
+ */
+
+#include <linux/random.h>
+#include "aufs.h"
+
+unsigned long sysaufs_si_mask;
+struct kset *sysaufs_kset;
+
+#define AuSiAttr(_name) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = sysaufs_si_##_name, \
+}
+
+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path);
+struct attribute *sysaufs_si_attrs[] = {
+ &sysaufs_si_attr_xi_path.attr,
+ NULL,
+};
+
+static const struct sysfs_ops au_sbi_ops = {
+ .show = sysaufs_si_show
+};
+
+static struct kobj_type au_sbi_ktype = {
+ .release = au_si_free,
+ .sysfs_ops = &au_sbi_ops,
+ .default_attrs = sysaufs_si_attrs
+};
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_init(struct au_sbinfo *sbinfo)
+{
+ int err;
+
+ sbinfo->si_kobj.kset = sysaufs_kset;
+ /* cf. sysaufs_name() */
+ err = kobject_init_and_add
+ (&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL,
+ SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo));
+
+ dbgaufs_si_null(sbinfo);
+ if (!err) {
+ err = dbgaufs_si_init(sbinfo);
+ if (unlikely(err))
+ kobject_put(&sbinfo->si_kobj);
+ }
+ return err;
+}
+
+void sysaufs_fin(void)
+{
+ dbgaufs_fin();
+ sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+ kset_unregister(sysaufs_kset);
+}
+
+int __init sysaufs_init(void)
+{
+ int err;
+
+ do {
+ get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask));
+ } while (!sysaufs_si_mask);
+
+ err = -EINVAL;
+ sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj);
+ if (unlikely(!sysaufs_kset))
+ goto out;
+ err = PTR_ERR(sysaufs_kset);
+ if (IS_ERR(sysaufs_kset))
+ goto out;
+ err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group);
+ if (unlikely(err)) {
+ kset_unregister(sysaufs_kset);
+ goto out;
+ }
+
+ err = dbgaufs_init();
+ if (unlikely(err))
+ sysaufs_fin();
+out:
+ return err;
+}
diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h
new file mode 100644
index 000000000..1f799835e
--- /dev/null
+++ b/fs/aufs/sysaufs.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sysfs interface and mount lifetime management
+ */
+
+#ifndef __SYSAUFS_H__
+#define __SYSAUFS_H__
+
+#ifdef __KERNEL__
+
+#include <linux/sysfs.h>
+#include "module.h"
+
+struct super_block;
+struct au_sbinfo;
+
+struct sysaufs_si_attr {
+ struct attribute attr;
+ int (*show)(struct seq_file *seq, struct super_block *sb);
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* sysaufs.c */
+extern unsigned long sysaufs_si_mask;
+extern struct kset *sysaufs_kset;
+extern struct attribute *sysaufs_si_attrs[];
+int sysaufs_si_init(struct au_sbinfo *sbinfo);
+int __init sysaufs_init(void);
+void sysaufs_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+/* some people doesn't like to show a pointer in kernel */
+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo)
+{
+ return sysaufs_si_mask ^ (unsigned long)sbinfo;
+}
+
+#define SysaufsSiNamePrefix "si_"
+#define SysaufsSiNameLen (sizeof(SysaufsSiNamePrefix) + 16)
+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name)
+{
+ snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx",
+ sysaufs_si_id(sbinfo));
+}
+
+struct au_branch;
+#ifdef CONFIG_SYSFS
+/* sysfs.c */
+extern struct attribute_group *sysaufs_attr_group;
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb);
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+long au_brinfo_ioctl(struct file *file, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg);
+#endif
+
+void sysaufs_br_init(struct au_branch *br);
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex);
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex);
+
+#define sysaufs_brs_init() do {} while (0)
+
+#else
+#define sysaufs_attr_group NULL
+
+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb)
+AuStub(ssize_t, sysaufs_si_show, return 0, struct kobject *kobj,
+ struct attribute *attr, char *buf)
+AuStubVoid(sysaufs_br_init, struct au_branch *br)
+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex)
+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex)
+
+static inline void sysaufs_brs_init(void)
+{
+ sysaufs_brs = 0;
+}
+
+#endif /* CONFIG_SYSFS */
+
+#endif /* __KERNEL__ */
+#endif /* __SYSAUFS_H__ */
diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c
new file mode 100644
index 000000000..ed42f53d0
--- /dev/null
+++ b/fs/aufs/sysfs.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sysfs interface
+ */
+
+#include <linux/compat.h>
+#include <linux/seq_file.h>
+#include "aufs.h"
+
+static struct attribute *au_attr[] = {
+ NULL, /* need to NULL terminate the list of attributes */
+};
+
+static struct attribute_group sysaufs_attr_group_body = {
+ .attrs = au_attr
+};
+
+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body;
+
+/* ---------------------------------------------------------------------- */
+
+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb)
+{
+ int err;
+
+ SiMustAnyLock(sb);
+
+ err = 0;
+ if (au_opt_test(au_mntflags(sb), XINO)) {
+ err = au_xino_path(seq, au_sbi(sb)->si_xib);
+ seq_putc(seq, '\n');
+ }
+ return err;
+}
+
+/*
+ * the lifetime of branch is independent from the entry under sysfs.
+ * sysfs handles the lifetime of the entry, and never call ->show() after it is
+ * unlinked.
+ */
+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb,
+ aufs_bindex_t bindex, int idx)
+{
+ int err;
+ struct path path;
+ struct dentry *root;
+ struct au_branch *br;
+ au_br_perm_str_t perm;
+
+ AuDbg("b%d\n", bindex);
+
+ err = 0;
+ root = sb->s_root;
+ di_read_lock_parent(root, !AuLock_IR);
+ br = au_sbr(sb, bindex);
+
+ switch (idx) {
+ case AuBrSysfs_BR:
+ path.mnt = au_br_mnt(br);
+ path.dentry = au_h_dptr(root, bindex);
+ err = au_seq_path(seq, &path);
+ if (!err) {
+ au_optstr_br_perm(&perm, br->br_perm);
+ seq_printf(seq, "=%s\n", perm.a);
+ }
+ break;
+ case AuBrSysfs_BRID:
+ seq_printf(seq, "%d\n", br->br_id);
+ break;
+ }
+ di_read_unlock(root, !AuLock_IR);
+ if (unlikely(err || seq_has_overflowed(seq)))
+ err = -E2BIG;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct seq_file *au_seq(char *p, ssize_t len)
+{
+ struct seq_file *seq;
+
+ seq = kzalloc(sizeof(*seq), GFP_NOFS);
+ if (seq) {
+ /* mutex_init(&seq.lock); */
+ seq->buf = p;
+ seq->size = len;
+ return seq; /* success */
+ }
+
+ seq = ERR_PTR(-ENOMEM);
+ return seq;
+}
+
+#define SysaufsBr_PREFIX "br"
+#define SysaufsBrid_PREFIX "brid"
+
+/* todo: file size may exceed PAGE_SIZE */
+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ ssize_t err;
+ int idx;
+ long l;
+ aufs_bindex_t bend;
+ struct au_sbinfo *sbinfo;
+ struct super_block *sb;
+ struct seq_file *seq;
+ char *name;
+ struct attribute **cattr;
+
+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj);
+ sb = sbinfo->si_sb;
+
+ /*
+ * prevent a race condition between sysfs and aufs.
+ * for instance, sysfs_file_read() calls sysfs_get_active_two() which
+ * prohibits maintaining the sysfs entries.
+ * hew we acquire read lock after sysfs_get_active_two().
+ * on the other hand, the remount process may maintain the sysfs/aufs
+ * entries after acquiring write lock.
+ * it can cause a deadlock.
+ * simply we gave up processing read here.
+ */
+ err = -EBUSY;
+ if (unlikely(!si_noflush_read_trylock(sb)))
+ goto out;
+
+ seq = au_seq(buf, PAGE_SIZE);
+ err = PTR_ERR(seq);
+ if (IS_ERR(seq))
+ goto out_unlock;
+
+ name = (void *)attr->name;
+ cattr = sysaufs_si_attrs;
+ while (*cattr) {
+ if (!strcmp(name, (*cattr)->name)) {
+ err = container_of(*cattr, struct sysaufs_si_attr, attr)
+ ->show(seq, sb);
+ goto out_seq;
+ }
+ cattr++;
+ }
+
+ if (!strncmp(name, SysaufsBrid_PREFIX,
+ sizeof(SysaufsBrid_PREFIX) - 1)) {
+ idx = AuBrSysfs_BRID;
+ name += sizeof(SysaufsBrid_PREFIX) - 1;
+ } else if (!strncmp(name, SysaufsBr_PREFIX,
+ sizeof(SysaufsBr_PREFIX) - 1)) {
+ idx = AuBrSysfs_BR;
+ name += sizeof(SysaufsBr_PREFIX) - 1;
+ } else
+ BUG();
+
+ err = kstrtol(name, 10, &l);
+ if (!err) {
+ bend = au_sbend(sb);
+ if (l <= bend)
+ err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx);
+ else
+ err = -ENOENT;
+ }
+
+out_seq:
+ if (!err) {
+ err = seq->count;
+ /* sysfs limit */
+ if (unlikely(err == PAGE_SIZE))
+ err = -EFBIG;
+ }
+ kfree(seq);
+out_unlock:
+ si_read_unlock(sb);
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_brinfo(struct super_block *sb, union aufs_brinfo __user *arg)
+{
+ int err;
+ int16_t brid;
+ aufs_bindex_t bindex, bend;
+ size_t sz;
+ char *buf;
+ struct seq_file *seq;
+ struct au_branch *br;
+
+ si_read_lock(sb, AuLock_FLUSH);
+ bend = au_sbend(sb);
+ err = bend + 1;
+ if (!arg)
+ goto out;
+
+ err = -ENOMEM;
+ buf = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!buf))
+ goto out;
+
+ seq = au_seq(buf, PAGE_SIZE);
+ err = PTR_ERR(seq);
+ if (IS_ERR(seq))
+ goto out_buf;
+
+ sz = sizeof(*arg) - offsetof(union aufs_brinfo, path);
+ for (bindex = 0; bindex <= bend; bindex++, arg++) {
+ err = !access_ok(VERIFY_WRITE, arg, sizeof(*arg));
+ if (unlikely(err))
+ break;
+
+ br = au_sbr(sb, bindex);
+ brid = br->br_id;
+ BUILD_BUG_ON(sizeof(brid) != sizeof(arg->id));
+ err = __put_user(brid, &arg->id);
+ if (unlikely(err))
+ break;
+
+ BUILD_BUG_ON(sizeof(br->br_perm) != sizeof(arg->perm));
+ err = __put_user(br->br_perm, &arg->perm);
+ if (unlikely(err))
+ break;
+
+ err = au_seq_path(seq, &br->br_path);
+ if (unlikely(err))
+ break;
+ seq_putc(seq, '\0');
+ if (!seq_has_overflowed(seq)) {
+ err = copy_to_user(arg->path, seq->buf, seq->count);
+ seq->count = 0;
+ if (unlikely(err))
+ break;
+ } else {
+ err = -E2BIG;
+ goto out_seq;
+ }
+ }
+ if (unlikely(err))
+ err = -EFAULT;
+
+out_seq:
+ kfree(seq);
+out_buf:
+ free_page((unsigned long)buf);
+out:
+ si_read_unlock(sb);
+ return err;
+}
+
+long au_brinfo_ioctl(struct file *file, unsigned long arg)
+{
+ return au_brinfo(file->f_path.dentry->d_sb, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg)
+{
+ return au_brinfo(file->f_path.dentry->d_sb, compat_ptr(arg));
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+void sysaufs_br_init(struct au_branch *br)
+{
+ int i;
+ struct au_brsysfs *br_sysfs;
+ struct attribute *attr;
+
+ br_sysfs = br->br_sysfs;
+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+ attr = &br_sysfs->attr;
+ sysfs_attr_init(attr);
+ attr->name = br_sysfs->name;
+ attr->mode = S_IRUGO;
+ br_sysfs++;
+ }
+}
+
+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex)
+{
+ struct au_branch *br;
+ struct kobject *kobj;
+ struct au_brsysfs *br_sysfs;
+ int i;
+ aufs_bindex_t bend;
+
+ dbgaufs_brs_del(sb, bindex);
+
+ if (!sysaufs_brs)
+ return;
+
+ kobj = &au_sbi(sb)->si_kobj;
+ bend = au_sbend(sb);
+ for (; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ br_sysfs = br->br_sysfs;
+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+ sysfs_remove_file(kobj, &br_sysfs->attr);
+ br_sysfs++;
+ }
+ }
+}
+
+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex)
+{
+ int err, i;
+ aufs_bindex_t bend;
+ struct kobject *kobj;
+ struct au_branch *br;
+ struct au_brsysfs *br_sysfs;
+
+ dbgaufs_brs_add(sb, bindex);
+
+ if (!sysaufs_brs)
+ return;
+
+ kobj = &au_sbi(sb)->si_kobj;
+ bend = au_sbend(sb);
+ for (; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ br_sysfs = br->br_sysfs;
+ snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name),
+ SysaufsBr_PREFIX "%d", bindex);
+ snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name),
+ SysaufsBrid_PREFIX "%d", bindex);
+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) {
+ err = sysfs_create_file(kobj, &br_sysfs->attr);
+ if (unlikely(err))
+ pr_warn("failed %s under sysfs(%d)\n",
+ br_sysfs->name, err);
+ br_sysfs++;
+ }
+ }
+}
diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c
new file mode 100644
index 000000000..7921ed716
--- /dev/null
+++ b/fs/aufs/sysrq.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * magic sysrq hanlder
+ */
+
+/* #include <linux/sysrq.h> */
+#include <linux/writeback.h>
+#include "aufs.h"
+
+/* ---------------------------------------------------------------------- */
+
+static void sysrq_sb(struct super_block *sb)
+{
+ char *plevel;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+ struct au_sphlhead *files;
+ struct au_finfo *finfo;
+
+ plevel = au_plevel;
+ au_plevel = KERN_WARNING;
+
+ /* since we define pr_fmt, call printk directly */
+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str)
+
+ sbinfo = au_sbi(sb);
+ printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo));
+ pr("superblock\n");
+ au_dpri_sb(sb);
+
+#if 0
+ pr("root dentry\n");
+ au_dpri_dentry(sb->s_root);
+ pr("root inode\n");
+ au_dpri_inode(d_inode(sb->s_root));
+#endif
+
+#if 0
+ do {
+ int err, i, j, ndentry;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+
+ err = au_dpages_init(&dpages, GFP_ATOMIC);
+ if (unlikely(err))
+ break;
+ err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL);
+ if (!err)
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++)
+ au_dpri_dentry(dpage->dentries[j]);
+ }
+ au_dpages_free(&dpages);
+ } while (0);
+#endif
+
+#if 1
+ {
+ struct inode *i;
+
+ pr("isolated inode\n");
+ spin_lock(&sb->s_inode_list_lock);
+ list_for_each_entry(i, &sb->s_inodes, i_sb_list) {
+ spin_lock(&i->i_lock);
+ if (1 || hlist_empty(&i->i_dentry))
+ au_dpri_inode(i);
+ spin_unlock(&i->i_lock);
+ }
+ spin_unlock(&sb->s_inode_list_lock);
+ }
+#endif
+ pr("files\n");
+ files = &au_sbi(sb)->si_files;
+ spin_lock(&files->spin);
+ hlist_for_each_entry(finfo, &files->head, fi_hlist) {
+ umode_t mode;
+
+ file = finfo->fi_file;
+ mode = file_inode(file)->i_mode;
+ if (!special_file(mode))
+ au_dpri_file(file);
+ }
+ spin_unlock(&files->spin);
+ pr("done\n");
+
+#undef pr
+ au_plevel = plevel;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* module parameter */
+static char *aufs_sysrq_key = "a";
+module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO);
+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME);
+
+static void au_sysrq(int key __maybe_unused)
+{
+ struct au_sbinfo *sbinfo;
+
+ lockdep_off();
+ au_sbilist_lock();
+ list_for_each_entry(sbinfo, &au_sbilist.head, si_list)
+ sysrq_sb(sbinfo->si_sb);
+ au_sbilist_unlock();
+ lockdep_on();
+}
+
+static struct sysrq_key_op au_sysrq_op = {
+ .handler = au_sysrq,
+ .help_msg = "Aufs",
+ .action_msg = "Aufs",
+ .enable_mask = SYSRQ_ENABLE_DUMP
+};
+
+/* ---------------------------------------------------------------------- */
+
+int __init au_sysrq_init(void)
+{
+ int err;
+ char key;
+
+ err = -1;
+ key = *aufs_sysrq_key;
+ if ('a' <= key && key <= 'z')
+ err = register_sysrq_key(key, &au_sysrq_op);
+ if (unlikely(err))
+ pr_err("err %d, sysrq=%c\n", err, key);
+ return err;
+}
+
+void au_sysrq_fin(void)
+{
+ int err;
+
+ err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op);
+ if (unlikely(err))
+ pr_err("err %d (ignored)\n", err);
+}
diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c
new file mode 100644
index 000000000..f64cc2b7a
--- /dev/null
+++ b/fs/aufs/vdir.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * virtual or vertical directory
+ */
+
+#include "aufs.h"
+
+static unsigned int calc_size(int nlen)
+{
+ return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t));
+}
+
+static int set_deblk_end(union au_vdir_deblk_p *p,
+ union au_vdir_deblk_p *deblk_end)
+{
+ if (calc_size(0) <= deblk_end->deblk - p->deblk) {
+ p->de->de_str.len = 0;
+ /* smp_mb(); */
+ return 0;
+ }
+ return -1; /* error */
+}
+
+/* returns true or false */
+static int is_deblk_end(union au_vdir_deblk_p *p,
+ union au_vdir_deblk_p *deblk_end)
+{
+ if (calc_size(0) <= deblk_end->deblk - p->deblk)
+ return !p->de->de_str.len;
+ return 1;
+}
+
+static unsigned char *last_deblk(struct au_vdir *vdir)
+{
+ return vdir->vd_deblk[vdir->vd_nblk - 1];
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* estimate the appropriate size for name hash table */
+unsigned int au_rdhash_est(loff_t sz)
+{
+ unsigned int n;
+
+ n = UINT_MAX;
+ sz >>= 10;
+ if (sz < n)
+ n = sz;
+ if (sz < AUFS_RDHASH_DEF)
+ n = AUFS_RDHASH_DEF;
+ /* pr_info("n %u\n", n); */
+ return n;
+}
+
+/*
+ * the allocated memory has to be freed by
+ * au_nhash_wh_free() or au_nhash_de_free().
+ */
+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp)
+{
+ struct hlist_head *head;
+ unsigned int u;
+ size_t sz;
+
+ sz = sizeof(*nhash->nh_head) * num_hash;
+ head = kmalloc(sz, gfp);
+ if (head) {
+ nhash->nh_num = num_hash;
+ nhash->nh_head = head;
+ for (u = 0; u < num_hash; u++)
+ INIT_HLIST_HEAD(head++);
+ return 0; /* success */
+ }
+
+ return -ENOMEM;
+}
+
+static void nhash_count(struct hlist_head *head)
+{
+#if 0
+ unsigned long n;
+ struct hlist_node *pos;
+
+ n = 0;
+ hlist_for_each(pos, head)
+ n++;
+ pr_info("%lu\n", n);
+#endif
+}
+
+static void au_nhash_wh_do_free(struct hlist_head *head)
+{
+ struct au_vdir_wh *pos;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(pos, node, head, wh_hash)
+ kfree(pos);
+}
+
+static void au_nhash_de_do_free(struct hlist_head *head)
+{
+ struct au_vdir_dehstr *pos;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(pos, node, head, hash)
+ au_cache_free_vdir_dehstr(pos);
+}
+
+static void au_nhash_do_free(struct au_nhash *nhash,
+ void (*free)(struct hlist_head *head))
+{
+ unsigned int n;
+ struct hlist_head *head;
+
+ n = nhash->nh_num;
+ if (!n)
+ return;
+
+ head = nhash->nh_head;
+ while (n-- > 0) {
+ nhash_count(head);
+ free(head++);
+ }
+ kfree(nhash->nh_head);
+}
+
+void au_nhash_wh_free(struct au_nhash *whlist)
+{
+ au_nhash_do_free(whlist, au_nhash_wh_do_free);
+}
+
+static void au_nhash_de_free(struct au_nhash *delist)
+{
+ au_nhash_do_free(delist, au_nhash_de_do_free);
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt,
+ int limit)
+{
+ int num;
+ unsigned int u, n;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+
+ num = 0;
+ n = whlist->nh_num;
+ head = whlist->nh_head;
+ for (u = 0; u < n; u++, head++)
+ hlist_for_each_entry(pos, head, wh_hash)
+ if (pos->wh_bindex == btgt && ++num > limit)
+ return 1;
+ return 0;
+}
+
+static struct hlist_head *au_name_hash(struct au_nhash *nhash,
+ unsigned char *name,
+ unsigned int len)
+{
+ unsigned int v;
+ /* const unsigned int magic_bit = 12; */
+
+ AuDebugOn(!nhash->nh_num || !nhash->nh_head);
+
+ v = 0;
+ while (len--)
+ v += *name++;
+ /* v = hash_long(v, magic_bit); */
+ v %= nhash->nh_num;
+ return nhash->nh_head + v;
+}
+
+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name,
+ int nlen)
+{
+ return str->len == nlen && !memcmp(str->name, name, nlen);
+}
+
+/* returns found or not */
+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen)
+{
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+ struct au_vdir_destr *str;
+
+ head = au_name_hash(whlist, name, nlen);
+ hlist_for_each_entry(pos, head, wh_hash) {
+ str = &pos->wh_str;
+ AuDbg("%.*s\n", str->len, str->name);
+ if (au_nhash_test_name(str, name, nlen))
+ return 1;
+ }
+ return 0;
+}
+
+/* returns found(true) or not */
+static int test_known(struct au_nhash *delist, char *name, int nlen)
+{
+ struct hlist_head *head;
+ struct au_vdir_dehstr *pos;
+ struct au_vdir_destr *str;
+
+ head = au_name_hash(delist, name, nlen);
+ hlist_for_each_entry(pos, head, hash) {
+ str = pos->str;
+ AuDbg("%.*s\n", str->len, str->name);
+ if (au_nhash_test_name(str, name, nlen))
+ return 1;
+ }
+ return 0;
+}
+
+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino,
+ unsigned char d_type)
+{
+#ifdef CONFIG_AUFS_SHWH
+ wh->wh_ino = ino;
+ wh->wh_type = d_type;
+#endif
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino,
+ unsigned int d_type, aufs_bindex_t bindex,
+ unsigned char shwh)
+{
+ int err;
+ struct au_vdir_destr *str;
+ struct au_vdir_wh *wh;
+
+ AuDbg("%.*s\n", nlen, name);
+ AuDebugOn(!whlist->nh_num || !whlist->nh_head);
+
+ err = -ENOMEM;
+ wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS);
+ if (unlikely(!wh))
+ goto out;
+
+ err = 0;
+ wh->wh_bindex = bindex;
+ if (shwh)
+ au_shwh_init_wh(wh, ino, d_type);
+ str = &wh->wh_str;
+ str->len = nlen;
+ memcpy(str->name, name, nlen);
+ hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen));
+ /* smp_mb(); */
+
+out:
+ return err;
+}
+
+static int append_deblk(struct au_vdir *vdir)
+{
+ int err;
+ unsigned long ul;
+ const unsigned int deblk_sz = vdir->vd_deblk_sz;
+ union au_vdir_deblk_p p, deblk_end;
+ unsigned char **o;
+
+ err = -ENOMEM;
+ o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1),
+ GFP_NOFS);
+ if (unlikely(!o))
+ goto out;
+
+ vdir->vd_deblk = o;
+ p.deblk = kmalloc(deblk_sz, GFP_NOFS);
+ if (p.deblk) {
+ ul = vdir->vd_nblk++;
+ vdir->vd_deblk[ul] = p.deblk;
+ vdir->vd_last.ul = ul;
+ vdir->vd_last.p.deblk = p.deblk;
+ deblk_end.deblk = p.deblk + deblk_sz;
+ err = set_deblk_end(&p, &deblk_end);
+ }
+
+out:
+ return err;
+}
+
+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino,
+ unsigned int d_type, struct au_nhash *delist)
+{
+ int err;
+ unsigned int sz;
+ const unsigned int deblk_sz = vdir->vd_deblk_sz;
+ union au_vdir_deblk_p p, *room, deblk_end;
+ struct au_vdir_dehstr *dehstr;
+
+ p.deblk = last_deblk(vdir);
+ deblk_end.deblk = p.deblk + deblk_sz;
+ room = &vdir->vd_last.p;
+ AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk
+ || !is_deblk_end(room, &deblk_end));
+
+ sz = calc_size(nlen);
+ if (unlikely(sz > deblk_end.deblk - room->deblk)) {
+ err = append_deblk(vdir);
+ if (unlikely(err))
+ goto out;
+
+ p.deblk = last_deblk(vdir);
+ deblk_end.deblk = p.deblk + deblk_sz;
+ /* smp_mb(); */
+ AuDebugOn(room->deblk != p.deblk);
+ }
+
+ err = -ENOMEM;
+ dehstr = au_cache_alloc_vdir_dehstr();
+ if (unlikely(!dehstr))
+ goto out;
+
+ dehstr->str = &room->de->de_str;
+ hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen));
+ room->de->de_ino = ino;
+ room->de->de_type = d_type;
+ room->de->de_str.len = nlen;
+ memcpy(room->de->de_str.name, name, nlen);
+
+ err = 0;
+ room->deblk += sz;
+ if (unlikely(set_deblk_end(room, &deblk_end)))
+ err = append_deblk(vdir);
+ /* smp_mb(); */
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_vdir_free(struct au_vdir *vdir)
+{
+ unsigned char **deblk;
+
+ deblk = vdir->vd_deblk;
+ while (vdir->vd_nblk--)
+ kfree(*deblk++);
+ kfree(vdir->vd_deblk);
+ au_cache_free_vdir(vdir);
+}
+
+static struct au_vdir *alloc_vdir(struct file *file)
+{
+ struct au_vdir *vdir;
+ struct super_block *sb;
+ int err;
+
+ sb = file->f_path.dentry->d_sb;
+ SiMustAnyLock(sb);
+
+ err = -ENOMEM;
+ vdir = au_cache_alloc_vdir();
+ if (unlikely(!vdir))
+ goto out;
+
+ vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS);
+ if (unlikely(!vdir->vd_deblk))
+ goto out_free;
+
+ vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk;
+ if (!vdir->vd_deblk_sz) {
+ /* estimate the appropriate size for deblk */
+ vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL);
+ /* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */
+ }
+ vdir->vd_nblk = 0;
+ vdir->vd_version = 0;
+ vdir->vd_jiffy = 0;
+ err = append_deblk(vdir);
+ if (!err)
+ return vdir; /* success */
+
+ kfree(vdir->vd_deblk);
+
+out_free:
+ au_cache_free_vdir(vdir);
+out:
+ vdir = ERR_PTR(err);
+ return vdir;
+}
+
+static int reinit_vdir(struct au_vdir *vdir)
+{
+ int err;
+ union au_vdir_deblk_p p, deblk_end;
+
+ while (vdir->vd_nblk > 1) {
+ kfree(vdir->vd_deblk[vdir->vd_nblk - 1]);
+ /* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */
+ vdir->vd_nblk--;
+ }
+ p.deblk = vdir->vd_deblk[0];
+ deblk_end.deblk = p.deblk + vdir->vd_deblk_sz;
+ err = set_deblk_end(&p, &deblk_end);
+ /* keep vd_dblk_sz */
+ vdir->vd_last.ul = 0;
+ vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+ vdir->vd_version = 0;
+ vdir->vd_jiffy = 0;
+ /* smp_mb(); */
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define AuFillVdir_CALLED 1
+#define AuFillVdir_WHABLE (1 << 1)
+#define AuFillVdir_SHWH (1 << 2)
+#define au_ftest_fillvdir(flags, name) ((flags) & AuFillVdir_##name)
+#define au_fset_fillvdir(flags, name) \
+ do { (flags) |= AuFillVdir_##name; } while (0)
+#define au_fclr_fillvdir(flags, name) \
+ do { (flags) &= ~AuFillVdir_##name; } while (0)
+
+#ifndef CONFIG_AUFS_SHWH
+#undef AuFillVdir_SHWH
+#define AuFillVdir_SHWH 0
+#endif
+
+struct fillvdir_arg {
+ struct dir_context ctx;
+ struct file *file;
+ struct au_vdir *vdir;
+ struct au_nhash delist;
+ struct au_nhash whlist;
+ aufs_bindex_t bindex;
+ unsigned int flags;
+ int err;
+};
+
+static int fillvdir(struct dir_context *ctx, const char *__name, int nlen,
+ loff_t offset __maybe_unused, u64 h_ino,
+ unsigned int d_type)
+{
+ struct fillvdir_arg *arg = container_of(ctx, struct fillvdir_arg, ctx);
+ char *name = (void *)__name;
+ struct super_block *sb;
+ ino_t ino;
+ const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH);
+
+ arg->err = 0;
+ sb = arg->file->f_path.dentry->d_sb;
+ au_fset_fillvdir(arg->flags, CALLED);
+ /* smp_mb(); */
+ if (nlen <= AUFS_WH_PFX_LEN
+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) {
+ if (test_known(&arg->delist, name, nlen)
+ || au_nhash_test_known_wh(&arg->whlist, name, nlen))
+ goto out; /* already exists or whiteouted */
+
+ arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino);
+ if (!arg->err) {
+ if (unlikely(nlen > AUFS_MAX_NAMELEN))
+ d_type = DT_UNKNOWN;
+ arg->err = append_de(arg->vdir, name, nlen, ino,
+ d_type, &arg->delist);
+ }
+ } else if (au_ftest_fillvdir(arg->flags, WHABLE)) {
+ name += AUFS_WH_PFX_LEN;
+ nlen -= AUFS_WH_PFX_LEN;
+ if (au_nhash_test_known_wh(&arg->whlist, name, nlen))
+ goto out; /* already whiteouted */
+
+ if (shwh)
+ arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type,
+ &ino);
+ if (!arg->err) {
+ if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN)
+ d_type = DT_UNKNOWN;
+ arg->err = au_nhash_append_wh
+ (&arg->whlist, name, nlen, ino, d_type,
+ arg->bindex, shwh);
+ }
+ }
+
+out:
+ if (!arg->err)
+ arg->vdir->vd_jiffy = jiffies;
+ /* smp_mb(); */
+ AuTraceErr(arg->err);
+ return arg->err;
+}
+
+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir,
+ struct au_nhash *whlist, struct au_nhash *delist)
+{
+#ifdef CONFIG_AUFS_SHWH
+ int err;
+ unsigned int nh, u;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+ struct hlist_node *n;
+ char *p, *o;
+ struct au_vdir_destr *destr;
+
+ AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH));
+
+ err = -ENOMEM;
+ o = p = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+
+ err = 0;
+ nh = whlist->nh_num;
+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+ p += AUFS_WH_PFX_LEN;
+ for (u = 0; u < nh; u++) {
+ head = whlist->nh_head + u;
+ hlist_for_each_entry_safe(pos, n, head, wh_hash) {
+ destr = &pos->wh_str;
+ memcpy(p, destr->name, destr->len);
+ err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN,
+ pos->wh_ino, pos->wh_type, delist);
+ if (unlikely(err))
+ break;
+ }
+ }
+
+ free_page((unsigned long)o);
+
+out:
+ AuTraceErr(err);
+ return err;
+#else
+ return 0;
+#endif
+}
+
+static int au_do_read_vdir(struct fillvdir_arg *arg)
+{
+ int err;
+ unsigned int rdhash;
+ loff_t offset;
+ aufs_bindex_t bend, bindex, bstart;
+ unsigned char shwh;
+ struct file *hf, *file;
+ struct super_block *sb;
+
+ file = arg->file;
+ sb = file->f_path.dentry->d_sb;
+ SiMustAnyLock(sb);
+
+ rdhash = au_sbi(sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL));
+ err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS);
+ if (unlikely(err))
+ goto out_delist;
+
+ err = 0;
+ arg->flags = 0;
+ shwh = 0;
+ if (au_opt_test(au_mntflags(sb), SHWH)) {
+ shwh = 1;
+ au_fset_fillvdir(arg->flags, SHWH);
+ }
+ bstart = au_fbstart(file);
+ bend = au_fbend_dir(file);
+ for (bindex = bstart; !err && bindex <= bend; bindex++) {
+ hf = au_hf_dir(file, bindex);
+ if (!hf)
+ continue;
+
+ offset = vfsub_llseek(hf, 0, SEEK_SET);
+ err = offset;
+ if (unlikely(offset))
+ break;
+
+ arg->bindex = bindex;
+ au_fclr_fillvdir(arg->flags, WHABLE);
+ if (shwh
+ || (bindex != bend
+ && au_br_whable(au_sbr_perm(sb, bindex))))
+ au_fset_fillvdir(arg->flags, WHABLE);
+ do {
+ arg->err = 0;
+ au_fclr_fillvdir(arg->flags, CALLED);
+ /* smp_mb(); */
+ err = vfsub_iterate_dir(hf, &arg->ctx);
+ if (err >= 0)
+ err = arg->err;
+ } while (!err && au_ftest_fillvdir(arg->flags, CALLED));
+
+ /*
+ * dir_relax() may be good for concurrency, but aufs should not
+ * use it since it will cause a lockdep problem.
+ */
+ }
+
+ if (!err && shwh)
+ err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist);
+
+ au_nhash_wh_free(&arg->whlist);
+
+out_delist:
+ au_nhash_de_free(&arg->delist);
+out:
+ return err;
+}
+
+static int read_vdir(struct file *file, int may_read)
+{
+ int err;
+ unsigned long expire;
+ unsigned char do_read;
+ struct fillvdir_arg arg = {
+ .ctx = {
+ .actor = fillvdir
+ }
+ };
+ struct inode *inode;
+ struct au_vdir *vdir, *allocated;
+
+ err = 0;
+ inode = file_inode(file);
+ IMustLock(inode);
+ SiMustAnyLock(inode->i_sb);
+
+ allocated = NULL;
+ do_read = 0;
+ expire = au_sbi(inode->i_sb)->si_rdcache;
+ vdir = au_ivdir(inode);
+ if (!vdir) {
+ do_read = 1;
+ vdir = alloc_vdir(file);
+ err = PTR_ERR(vdir);
+ if (IS_ERR(vdir))
+ goto out;
+ err = 0;
+ allocated = vdir;
+ } else if (may_read
+ && (inode->i_version != vdir->vd_version
+ || time_after(jiffies, vdir->vd_jiffy + expire))) {
+ do_read = 1;
+ err = reinit_vdir(vdir);
+ if (unlikely(err))
+ goto out;
+ }
+
+ if (!do_read)
+ return 0; /* success */
+
+ arg.file = file;
+ arg.vdir = vdir;
+ err = au_do_read_vdir(&arg);
+ if (!err) {
+ /* file->f_pos = 0; */ /* todo: ctx->pos? */
+ vdir->vd_version = inode->i_version;
+ vdir->vd_last.ul = 0;
+ vdir->vd_last.p.deblk = vdir->vd_deblk[0];
+ if (allocated)
+ au_set_ivdir(inode, allocated);
+ } else if (allocated)
+ au_vdir_free(allocated);
+
+out:
+ return err;
+}
+
+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src)
+{
+ int err, rerr;
+ unsigned long ul, n;
+ const unsigned int deblk_sz = src->vd_deblk_sz;
+
+ AuDebugOn(tgt->vd_nblk != 1);
+
+ err = -ENOMEM;
+ if (tgt->vd_nblk < src->vd_nblk) {
+ unsigned char **p;
+
+ p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk,
+ GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+ tgt->vd_deblk = p;
+ }
+
+ if (tgt->vd_deblk_sz != deblk_sz) {
+ unsigned char *p;
+
+ tgt->vd_deblk_sz = deblk_sz;
+ p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS);
+ if (unlikely(!p))
+ goto out;
+ tgt->vd_deblk[0] = p;
+ }
+ memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz);
+ tgt->vd_version = src->vd_version;
+ tgt->vd_jiffy = src->vd_jiffy;
+
+ n = src->vd_nblk;
+ for (ul = 1; ul < n; ul++) {
+ tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz,
+ GFP_NOFS);
+ if (unlikely(!tgt->vd_deblk[ul]))
+ goto out;
+ tgt->vd_nblk++;
+ }
+ tgt->vd_nblk = n;
+ tgt->vd_last.ul = tgt->vd_last.ul;
+ tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul];
+ tgt->vd_last.p.deblk += src->vd_last.p.deblk
+ - src->vd_deblk[src->vd_last.ul];
+ /* smp_mb(); */
+ return 0; /* success */
+
+out:
+ rerr = reinit_vdir(tgt);
+ BUG_ON(rerr);
+ return err;
+}
+
+int au_vdir_init(struct file *file)
+{
+ int err;
+ struct inode *inode;
+ struct au_vdir *vdir_cache, *allocated;
+
+ /* test file->f_pos here instead of ctx->pos */
+ err = read_vdir(file, !file->f_pos);
+ if (unlikely(err))
+ goto out;
+
+ allocated = NULL;
+ vdir_cache = au_fvdir_cache(file);
+ if (!vdir_cache) {
+ vdir_cache = alloc_vdir(file);
+ err = PTR_ERR(vdir_cache);
+ if (IS_ERR(vdir_cache))
+ goto out;
+ allocated = vdir_cache;
+ } else if (!file->f_pos && vdir_cache->vd_version != file->f_version) {
+ /* test file->f_pos here instead of ctx->pos */
+ err = reinit_vdir(vdir_cache);
+ if (unlikely(err))
+ goto out;
+ } else
+ return 0; /* success */
+
+ inode = file_inode(file);
+ err = copy_vdir(vdir_cache, au_ivdir(inode));
+ if (!err) {
+ file->f_version = inode->i_version;
+ if (allocated)
+ au_set_fvdir_cache(file, allocated);
+ } else if (allocated)
+ au_vdir_free(allocated);
+
+out:
+ return err;
+}
+
+static loff_t calc_offset(struct au_vdir *vdir)
+{
+ loff_t offset;
+ union au_vdir_deblk_p p;
+
+ p.deblk = vdir->vd_deblk[vdir->vd_last.ul];
+ offset = vdir->vd_last.p.deblk - p.deblk;
+ offset += vdir->vd_deblk_sz * vdir->vd_last.ul;
+ return offset;
+}
+
+/* returns true or false */
+static int seek_vdir(struct file *file, struct dir_context *ctx)
+{
+ int valid;
+ unsigned int deblk_sz;
+ unsigned long ul, n;
+ loff_t offset;
+ union au_vdir_deblk_p p, deblk_end;
+ struct au_vdir *vdir_cache;
+
+ valid = 1;
+ vdir_cache = au_fvdir_cache(file);
+ offset = calc_offset(vdir_cache);
+ AuDbg("offset %lld\n", offset);
+ if (ctx->pos == offset)
+ goto out;
+
+ vdir_cache->vd_last.ul = 0;
+ vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0];
+ if (!ctx->pos)
+ goto out;
+
+ valid = 0;
+ deblk_sz = vdir_cache->vd_deblk_sz;
+ ul = div64_u64(ctx->pos, deblk_sz);
+ AuDbg("ul %lu\n", ul);
+ if (ul >= vdir_cache->vd_nblk)
+ goto out;
+
+ n = vdir_cache->vd_nblk;
+ for (; ul < n; ul++) {
+ p.deblk = vdir_cache->vd_deblk[ul];
+ deblk_end.deblk = p.deblk + deblk_sz;
+ offset = ul;
+ offset *= deblk_sz;
+ while (!is_deblk_end(&p, &deblk_end) && offset < ctx->pos) {
+ unsigned int l;
+
+ l = calc_size(p.de->de_str.len);
+ offset += l;
+ p.deblk += l;
+ }
+ if (!is_deblk_end(&p, &deblk_end)) {
+ valid = 1;
+ vdir_cache->vd_last.ul = ul;
+ vdir_cache->vd_last.p = p;
+ break;
+ }
+ }
+
+out:
+ /* smp_mb(); */
+ AuTraceErr(!valid);
+ return valid;
+}
+
+int au_vdir_fill_de(struct file *file, struct dir_context *ctx)
+{
+ unsigned int l, deblk_sz;
+ union au_vdir_deblk_p deblk_end;
+ struct au_vdir *vdir_cache;
+ struct au_vdir_de *de;
+
+ vdir_cache = au_fvdir_cache(file);
+ if (!seek_vdir(file, ctx))
+ return 0;
+
+ deblk_sz = vdir_cache->vd_deblk_sz;
+ while (1) {
+ deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+ deblk_end.deblk += deblk_sz;
+ while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) {
+ de = vdir_cache->vd_last.p.de;
+ AuDbg("%.*s, off%lld, i%lu, dt%d\n",
+ de->de_str.len, de->de_str.name, ctx->pos,
+ (unsigned long)de->de_ino, de->de_type);
+ if (unlikely(!dir_emit(ctx, de->de_str.name,
+ de->de_str.len, de->de_ino,
+ de->de_type))) {
+ /* todo: ignore the error caused by udba? */
+ /* return err; */
+ return 0;
+ }
+
+ l = calc_size(de->de_str.len);
+ vdir_cache->vd_last.p.deblk += l;
+ ctx->pos += l;
+ }
+ if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) {
+ vdir_cache->vd_last.ul++;
+ vdir_cache->vd_last.p.deblk
+ = vdir_cache->vd_deblk[vdir_cache->vd_last.ul];
+ ctx->pos = deblk_sz * vdir_cache->vd_last.ul;
+ continue;
+ }
+ break;
+ }
+
+ /* smp_mb(); */
+ return 0;
+}
diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c
new file mode 100644
index 000000000..29e0bbc32
--- /dev/null
+++ b/fs/aufs/vfsub.c
@@ -0,0 +1,871 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/security.h>
+#include <linux/splice.h>
+#include "../fs/mount.h"
+#include "aufs.h"
+
+#ifdef CONFIG_AUFS_BR_FUSE
+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb)
+{
+ struct nsproxy *ns;
+
+ if (!au_test_fuse(h_sb) || !au_userns)
+ return 0;
+
+ ns = current->nsproxy;
+ /* no {get,put}_nsproxy(ns) */
+ return real_mount(mnt)->mnt_ns == ns->mnt_ns ? 0 : -EACCES;
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did)
+{
+ int err;
+ struct kstat st;
+ struct super_block *h_sb;
+
+ /* for remote fs, leave work for its getattr or d_revalidate */
+ /* for bad i_attr fs, handle them in aufs_getattr() */
+ /* still some fs may acquire i_mutex. we need to skip them */
+ err = 0;
+ if (!did)
+ did = &err;
+ h_sb = h_path->dentry->d_sb;
+ *did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb));
+ if (*did)
+ err = vfs_getattr(h_path, &st);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct file *vfsub_dentry_open(struct path *path, int flags)
+{
+ struct file *file;
+
+ file = dentry_open(path, flags /* | __FMODE_NONOTIFY */,
+ current_cred());
+ if (!IS_ERR_OR_NULL(file)
+ && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ i_readcount_inc(d_inode(path->dentry));
+
+ return file;
+}
+
+struct file *vfsub_filp_open(const char *path, int oflags, int mode)
+{
+ struct file *file;
+
+ lockdep_off();
+ file = filp_open(path,
+ oflags /* | __FMODE_NONOTIFY */,
+ mode);
+ lockdep_on();
+ if (IS_ERR(file))
+ goto out;
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+
+out:
+ return file;
+}
+
+/*
+ * Ideally this function should call VFS:do_last() in order to keep all its
+ * checkings. But it is very hard for aufs to regenerate several VFS internal
+ * structure such as nameidata. This is a second (or third) best approach.
+ * cf. linux/fs/namei.c:do_last(), lookup_open() and atomic_open().
+ */
+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *args, struct au_branch *br)
+{
+ int err;
+ struct file *file = args->file;
+ /* copied from linux/fs/namei.c:atomic_open() */
+ struct dentry *const DENTRY_NOT_SET = (void *)-1UL;
+
+ IMustLock(dir);
+ AuDebugOn(!dir->i_op->atomic_open);
+
+ err = au_br_test_oflag(args->open_flag, br);
+ if (unlikely(err))
+ goto out;
+
+ args->file->f_path.dentry = DENTRY_NOT_SET;
+ args->file->f_path.mnt = au_br_mnt(br);
+ err = dir->i_op->atomic_open(dir, dentry, file, args->open_flag,
+ args->create_mode, args->opened);
+ if (err >= 0) {
+ /* some filesystems don't set FILE_CREATED while succeeded? */
+ if (*args->opened & FILE_CREATED)
+ fsnotify_create(dir, dentry);
+ } else
+ goto out;
+
+
+ if (!err) {
+ /* todo: call VFS:may_open() here */
+ err = open_check_o_direct(file);
+ /* todo: ima_file_check() too? */
+ if (!err && (args->open_flag & __FMODE_EXEC))
+ err = deny_write_access(file);
+ if (unlikely(err))
+ /* note that the file is created and still opened */
+ goto out;
+ }
+
+ atomic_inc(&br->br_count);
+ fsnotify_open(file);
+
+out:
+ return err;
+}
+
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path)
+{
+ int err;
+
+ err = kern_path(name, flags, path);
+ if (!err && d_is_positive(path->dentry))
+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+struct dentry *vfsub_lookup_one_len_unlocked(const char *name,
+ struct dentry *parent, int len)
+{
+ struct path path = {
+ .mnt = NULL
+ };
+
+ path.dentry = lookup_one_len_unlocked(name, parent, len);
+ if (IS_ERR(path.dentry))
+ goto out;
+ if (d_is_positive(path.dentry))
+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+ AuTraceErrPtr(path.dentry);
+ return path.dentry;
+}
+
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+ int len)
+{
+ struct path path = {
+ .mnt = NULL
+ };
+
+ /* VFS checks it too, but by WARN_ON_ONCE() */
+ IMustLock(d_inode(parent));
+
+ path.dentry = lookup_one_len(name, parent, len);
+ if (IS_ERR(path.dentry))
+ goto out;
+ if (d_is_positive(path.dentry))
+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/
+
+out:
+ AuTraceErrPtr(path.dentry);
+ return path.dentry;
+}
+
+void vfsub_call_lkup_one(void *args)
+{
+ struct vfsub_lkup_one_args *a = args;
+ *a->errp = vfsub_lkup_one(a->name, a->parent);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2)
+{
+ struct dentry *d;
+
+ lockdep_off();
+ d = lock_rename(d1, d2);
+ lockdep_on();
+ au_hn_suspend(hdir1);
+ if (hdir1 != hdir2)
+ au_hn_suspend(hdir2);
+
+ return d;
+}
+
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2)
+{
+ au_hn_resume(hdir1);
+ if (hdir1 != hdir2)
+ au_hn_resume(hdir2);
+ lockdep_off();
+ unlock_rename(d1, d2);
+ lockdep_on();
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_mknod(path, d, mode, 0);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_create(dir, path->dentry, mode, want_excl);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_symlink(path, d, symname);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_symlink(dir, path->dentry, symname);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_mknod(path, d, mode, new_encode_dev(dev));
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_mknod(dir, path->dentry, mode, dev);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+static int au_test_nlink(struct inode *inode)
+{
+ const unsigned int link_max = UINT_MAX >> 1; /* rough margin */
+
+ if (!au_test_fs_no_limit_nlink(inode->i_sb)
+ || inode->i_nlink < link_max)
+ return 0;
+ return -EMLINK;
+}
+
+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path,
+ struct inode **delegated_inode)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ err = au_test_nlink(d_inode(src_dentry));
+ if (unlikely(err))
+ return err;
+
+ /* we don't call may_linkat() */
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_link(src_dentry, path, d);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_link(src_dentry, dir, path->dentry, delegated_inode);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ /* fuse has different memory inode for the same inumber */
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ tmp.dentry = src_dentry;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry,
+ struct inode *dir, struct path *path,
+ struct inode **delegated_inode)
+{
+ int err;
+ struct path tmp = {
+ .mnt = path->mnt
+ };
+ struct dentry *d;
+
+ IMustLock(dir);
+ IMustLock(src_dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ tmp.dentry = src_dentry->d_parent;
+ err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_rename(src_dir, src_dentry, dir, path->dentry,
+ delegated_inode, /*flags*/0);
+ lockdep_on();
+ if (!err) {
+ int did;
+
+ tmp.dentry = d->d_parent;
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = src_dentry;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ tmp.dentry = src_dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_mkdir(path, d, mode);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_mkdir(dir, path->dentry, mode);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = *path;
+ int did;
+
+ vfsub_update_h_iattr(&tmp, &did);
+ if (did) {
+ tmp.dentry = path->dentry->d_parent;
+ vfsub_update_h_iattr(&tmp, /*did*/NULL);
+ }
+ /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+int vfsub_rmdir(struct inode *dir, struct path *path)
+{
+ int err;
+ struct dentry *d;
+
+ IMustLock(dir);
+
+ d = path->dentry;
+ path->dentry = d->d_parent;
+ err = security_path_rmdir(path, d);
+ path->dentry = d;
+ if (unlikely(err))
+ goto out;
+
+ lockdep_off();
+ err = vfs_rmdir(dir, path->dentry);
+ lockdep_on();
+ if (!err) {
+ struct path tmp = {
+ .dentry = path->dentry->d_parent,
+ .mnt = path->mnt
+ };
+
+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* todo: support mmap_sem? */
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+
+ lockdep_off();
+ err = vfs_read(file, ubuf, count, ppos);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+/* todo: kernel_read()? */
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ char __user *u;
+ } buf;
+
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = vfsub_read_u(file, buf.u, count, ppos);
+ set_fs(oldfs);
+ return err;
+}
+
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t err;
+
+ lockdep_off();
+ err = vfs_write(file, ubuf, count, ppos);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ const char __user *u;
+ } buf;
+
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = vfsub_write_u(file, buf.u, count, ppos);
+ set_fs(oldfs);
+ return err;
+}
+
+int vfsub_flush(struct file *file, fl_owner_t id)
+{
+ int err;
+
+ err = 0;
+ if (file->f_op->flush) {
+ if (!au_test_nfs(file->f_path.dentry->d_sb))
+ err = file->f_op->flush(file, id);
+ else {
+ lockdep_off();
+ err = file->f_op->flush(file, id);
+ lockdep_on();
+ }
+ if (!err)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL);
+ /*ignore*/
+ }
+ return err;
+}
+
+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx)
+{
+ int err;
+
+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos);
+
+ lockdep_off();
+ err = iterate_dir(file, ctx);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ long err;
+
+ lockdep_off();
+ err = do_splice_to(in, ppos, pipe, len, flags);
+ lockdep_on();
+ file_accessed(in);
+ if (err >= 0)
+ vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ long err;
+
+ lockdep_off();
+ err = do_splice_from(pipe, out, ppos, len, flags);
+ lockdep_on();
+ if (err >= 0)
+ vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/
+ return err;
+}
+
+int vfsub_fsync(struct file *file, struct path *path, int datasync)
+{
+ int err;
+
+ /* file can be NULL */
+ lockdep_off();
+ err = vfs_fsync(file, datasync);
+ lockdep_on();
+ if (!err) {
+ if (!path) {
+ AuDebugOn(!file);
+ path = &file->f_path;
+ }
+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/
+ }
+ return err;
+}
+
+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+ struct file *h_file)
+{
+ int err;
+ struct inode *h_inode;
+ struct super_block *h_sb;
+
+ if (!h_file) {
+ err = vfsub_truncate(h_path, length);
+ goto out;
+ }
+
+ h_inode = d_inode(h_path->dentry);
+ h_sb = h_inode->i_sb;
+ lockdep_off();
+ sb_start_write(h_sb);
+ lockdep_on();
+ err = locks_verify_truncate(h_inode, h_file, length);
+ if (!err)
+ err = security_path_truncate(h_path);
+ if (!err) {
+ lockdep_off();
+ err = do_truncate(h_path->dentry, length, attr, h_file);
+ lockdep_on();
+ }
+ lockdep_off();
+ sb_end_write(h_sb);
+ lockdep_on();
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_vfsub_mkdir_args {
+ int *errp;
+ struct inode *dir;
+ struct path *path;
+ int mode;
+};
+
+static void au_call_vfsub_mkdir(void *args)
+{
+ struct au_vfsub_mkdir_args *a = args;
+ *a->errp = vfsub_mkdir(a->dir, a->path, a->mode);
+}
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode)
+{
+ int err, do_sio, wkq_err;
+
+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+ if (!do_sio) {
+ lockdep_off();
+ err = vfsub_mkdir(dir, path, mode);
+ lockdep_on();
+ } else {
+ struct au_vfsub_mkdir_args args = {
+ .errp = &err,
+ .dir = dir,
+ .path = path,
+ .mode = mode
+ };
+ wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
+
+struct au_vfsub_rmdir_args {
+ int *errp;
+ struct inode *dir;
+ struct path *path;
+};
+
+static void au_call_vfsub_rmdir(void *args)
+{
+ struct au_vfsub_rmdir_args *a = args;
+ *a->errp = vfsub_rmdir(a->dir, a->path);
+}
+
+int vfsub_sio_rmdir(struct inode *dir, struct path *path)
+{
+ int err, do_sio, wkq_err;
+
+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE);
+ if (!do_sio) {
+ lockdep_off();
+ err = vfsub_rmdir(dir, path);
+ lockdep_on();
+ } else {
+ struct au_vfsub_rmdir_args args = {
+ .errp = &err,
+ .dir = dir,
+ .path = path
+ };
+ wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct notify_change_args {
+ int *errp;
+ struct path *path;
+ struct iattr *ia;
+ struct inode **delegated_inode;
+};
+
+static void call_notify_change(void *args)
+{
+ struct notify_change_args *a = args;
+ struct inode *h_inode;
+
+ h_inode = d_inode(a->path->dentry);
+ IMustLock(h_inode);
+
+ *a->errp = -EPERM;
+ if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) {
+ lockdep_off();
+ *a->errp = notify_change(a->path->dentry, a->ia,
+ a->delegated_inode);
+ lockdep_on();
+ if (!*a->errp)
+ vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/
+ }
+ AuTraceErr(*a->errp);
+}
+
+int vfsub_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode)
+{
+ int err;
+ struct notify_change_args args = {
+ .errp = &err,
+ .path = path,
+ .ia = ia,
+ .delegated_inode = delegated_inode
+ };
+
+ call_notify_change(&args);
+
+ return err;
+}
+
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode)
+{
+ int err, wkq_err;
+ struct notify_change_args args = {
+ .errp = &err,
+ .path = path,
+ .ia = ia,
+ .delegated_inode = delegated_inode
+ };
+
+ wkq_err = au_wkq_wait(call_notify_change, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct unlink_args {
+ int *errp;
+ struct inode *dir;
+ struct path *path;
+ struct inode **delegated_inode;
+};
+
+static void call_unlink(void *args)
+{
+ struct unlink_args *a = args;
+ struct dentry *d = a->path->dentry;
+ struct inode *h_inode;
+ const int stop_sillyrename = (au_test_nfs(d->d_sb)
+ && au_dcount(d) == 1);
+
+ IMustLock(a->dir);
+
+ a->path->dentry = d->d_parent;
+ *a->errp = security_path_unlink(a->path, d);
+ a->path->dentry = d;
+ if (unlikely(*a->errp))
+ return;
+
+ if (!stop_sillyrename)
+ dget(d);
+ h_inode = NULL;
+ if (d_is_positive(d)) {
+ h_inode = d_inode(d);
+ ihold(h_inode);
+ }
+
+ lockdep_off();
+ *a->errp = vfs_unlink(a->dir, d, a->delegated_inode);
+ lockdep_on();
+ if (!*a->errp) {
+ struct path tmp = {
+ .dentry = d->d_parent,
+ .mnt = a->path->mnt
+ };
+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/
+ }
+
+ if (!stop_sillyrename)
+ dput(d);
+ if (h_inode)
+ iput(h_inode);
+
+ AuTraceErr(*a->errp);
+}
+
+/*
+ * @dir: must be locked.
+ * @dentry: target dentry.
+ */
+int vfsub_unlink(struct inode *dir, struct path *path,
+ struct inode **delegated_inode, int force)
+{
+ int err;
+ struct unlink_args args = {
+ .errp = &err,
+ .dir = dir,
+ .path = path,
+ .delegated_inode = delegated_inode
+ };
+
+ if (!force)
+ call_unlink(&args);
+ else {
+ int wkq_err;
+
+ wkq_err = au_wkq_wait(call_unlink, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+
+ return err;
+}
diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h
new file mode 100644
index 000000000..4e08d33c2
--- /dev/null
+++ b/fs/aufs/vfsub.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * sub-routines for VFS
+ */
+
+#ifndef __AUFS_VFSUB_H__
+#define __AUFS_VFSUB_H__
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/posix_acl.h>
+#include <linux/xattr.h>
+#include "debug.h"
+
+/* copied from linux/fs/internal.h */
+/* todo: BAD approach!! */
+extern void __mnt_drop_write(struct vfsmount *);
+extern int open_check_o_direct(struct file *f);
+
+/* ---------------------------------------------------------------------- */
+
+/* lock subclass for lower inode */
+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */
+/* reduce? gave up. */
+enum {
+ AuLsc_I_Begin = I_MUTEX_PARENT2, /* 5 */
+ AuLsc_I_PARENT, /* lower inode, parent first */
+ AuLsc_I_PARENT2, /* copyup dirs */
+ AuLsc_I_PARENT3, /* copyup wh */
+ AuLsc_I_CHILD,
+ AuLsc_I_CHILD2,
+ AuLsc_I_End
+};
+
+/* to debug easier, do not make them inlined functions */
+#define MtxMustLock(mtx) AuDebugOn(!mutex_is_locked(mtx))
+#define IMustLock(i) AuDebugOn(!inode_is_locked(i))
+
+/* ---------------------------------------------------------------------- */
+
+static inline void vfsub_drop_nlink(struct inode *inode)
+{
+ AuDebugOn(!inode->i_nlink);
+ drop_nlink(inode);
+}
+
+static inline void vfsub_dead_dir(struct inode *inode)
+{
+ AuDebugOn(!S_ISDIR(inode->i_mode));
+ inode->i_flags |= S_DEAD;
+ clear_nlink(inode);
+}
+
+static inline int vfsub_native_ro(struct inode *inode)
+{
+ return (inode->i_sb->s_flags & MS_RDONLY)
+ || IS_RDONLY(inode)
+ /* || IS_APPEND(inode) */
+ || IS_IMMUTABLE(inode);
+}
+
+#ifdef CONFIG_AUFS_BR_FUSE
+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb);
+#else
+AuStubInt0(vfsub_test_mntns, struct vfsmount *mnt, struct super_block *h_sb);
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_update_h_iattr(struct path *h_path, int *did);
+struct file *vfsub_dentry_open(struct path *path, int flags);
+struct file *vfsub_filp_open(const char *path, int oflags, int mode);
+struct vfsub_aopen_args {
+ struct file *file;
+ unsigned int open_flag;
+ umode_t create_mode;
+ int *opened;
+};
+struct au_branch;
+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct vfsub_aopen_args *args, struct au_branch *br);
+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path);
+
+struct dentry *vfsub_lookup_one_len_unlocked(const char *name,
+ struct dentry *parent, int len);
+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent,
+ int len);
+
+struct vfsub_lkup_one_args {
+ struct dentry **errp;
+ struct qstr *name;
+ struct dentry *parent;
+};
+
+static inline struct dentry *vfsub_lkup_one(struct qstr *name,
+ struct dentry *parent)
+{
+ return vfsub_lookup_one_len(name->name, parent, name->len);
+}
+
+void vfsub_call_lkup_one(void *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_mnt_want_write(struct vfsmount *mnt)
+{
+ int err;
+
+ lockdep_off();
+ err = mnt_want_write(mnt);
+ lockdep_on();
+ return err;
+}
+
+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt)
+{
+ lockdep_off();
+ mnt_drop_write(mnt);
+ lockdep_on();
+}
+
+#if 0 /* reserved */
+static inline void vfsub_mnt_drop_write_file(struct file *file)
+{
+ lockdep_off();
+ mnt_drop_write_file(file);
+ lockdep_on();
+}
+#endif
+
+/* ---------------------------------------------------------------------- */
+
+struct au_hinode;
+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2);
+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1,
+ struct dentry *d2, struct au_hinode *hdir2);
+
+int vfsub_create(struct inode *dir, struct path *path, int mode,
+ bool want_excl);
+int vfsub_symlink(struct inode *dir, struct path *path,
+ const char *symname);
+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev);
+int vfsub_link(struct dentry *src_dentry, struct inode *dir,
+ struct path *path, struct inode **delegated_inode);
+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry,
+ struct inode *hdir, struct path *path,
+ struct inode **delegated_inode);
+int vfsub_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_rmdir(struct inode *dir, struct path *path);
+
+/* ---------------------------------------------------------------------- */
+
+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count,
+ loff_t *ppos);
+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count,
+ loff_t *ppos);
+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count,
+ loff_t *ppos);
+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count,
+ loff_t *ppos);
+int vfsub_flush(struct file *file, fl_owner_t id);
+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx);
+
+static inline loff_t vfsub_f_size_read(struct file *file)
+{
+ return i_size_read(file_inode(file));
+}
+
+static inline unsigned int vfsub_file_flags(struct file *file)
+{
+ unsigned int flags;
+
+ spin_lock(&file->f_lock);
+ flags = file->f_flags;
+ spin_unlock(&file->f_lock);
+
+ return flags;
+}
+
+#if 0 /* reserved */
+static inline void vfsub_file_accessed(struct file *h_file)
+{
+ file_accessed(h_file);
+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/
+}
+#endif
+
+#if 0 /* reserved */
+static inline void vfsub_touch_atime(struct vfsmount *h_mnt,
+ struct dentry *h_dentry)
+{
+ struct path h_path = {
+ .dentry = h_dentry,
+ .mnt = h_mnt
+ };
+ touch_atime(&h_path);
+ vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/
+}
+#endif
+
+static inline int vfsub_update_time(struct inode *h_inode, struct timespec *ts,
+ int flags)
+{
+ return generic_update_time(h_inode, ts, flags);
+ /* no vfsub_update_h_iattr() since we don't have struct path */
+}
+
+#ifdef CONFIG_FS_POSIX_ACL
+static inline int vfsub_acl_chmod(struct inode *h_inode, umode_t h_mode)
+{
+ int err;
+
+ err = posix_acl_chmod(h_inode, h_mode);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+#else
+AuStubInt0(vfsub_acl_chmod, struct inode *h_inode, umode_t h_mode);
+#endif
+
+long vfsub_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
+
+static inline long vfsub_truncate(struct path *path, loff_t length)
+{
+ long err;
+
+ lockdep_off();
+ err = vfs_truncate(path, length);
+ lockdep_on();
+ return err;
+}
+
+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr,
+ struct file *h_file);
+int vfsub_fsync(struct file *file, struct path *path, int datasync);
+
+/* ---------------------------------------------------------------------- */
+
+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin)
+{
+ loff_t err;
+
+ lockdep_off();
+ err = vfs_llseek(file, offset, origin);
+ lockdep_on();
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode);
+int vfsub_sio_rmdir(struct inode *dir, struct path *path);
+int vfsub_sio_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode);
+int vfsub_notify_change(struct path *path, struct iattr *ia,
+ struct inode **delegated_inode);
+int vfsub_unlink(struct inode *dir, struct path *path,
+ struct inode **delegated_inode, int force);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int vfsub_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ int err;
+
+ lockdep_off();
+ err = vfs_setxattr(dentry, name, value, size, flags);
+ lockdep_on();
+
+ return err;
+}
+
+static inline int vfsub_removexattr(struct dentry *dentry, const char *name)
+{
+ int err;
+
+ lockdep_off();
+ err = vfs_removexattr(dentry, name);
+ lockdep_on();
+
+ return err;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_VFSUB_H__ */
diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c
new file mode 100644
index 000000000..994316302
--- /dev/null
+++ b/fs/aufs/wbr_policy.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * policies for selecting one among multiple writable branches
+ */
+
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* subset of cpup_attr() */
+static noinline_for_stack
+int au_cpdown_attr(struct path *h_path, struct dentry *h_src)
+{
+ int err, sbits;
+ struct iattr ia;
+ struct inode *h_isrc;
+
+ h_isrc = d_inode(h_src);
+ ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID;
+ ia.ia_mode = h_isrc->i_mode;
+ ia.ia_uid = h_isrc->i_uid;
+ ia.ia_gid = h_isrc->i_gid;
+ sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID));
+ au_cpup_attr_flags(d_inode(h_path->dentry), h_isrc->i_flags);
+ /* no delegation since it is just created */
+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
+
+ /* is this nfs only? */
+ if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) {
+ ia.ia_valid = ATTR_FORCE | ATTR_MODE;
+ ia.ia_mode = h_isrc->i_mode;
+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL);
+ }
+
+ return err;
+}
+
+#define AuCpdown_PARENT_OPQ 1
+#define AuCpdown_WHED (1 << 1)
+#define AuCpdown_MADE_DIR (1 << 2)
+#define AuCpdown_DIROPQ (1 << 3)
+#define au_ftest_cpdown(flags, name) ((flags) & AuCpdown_##name)
+#define au_fset_cpdown(flags, name) \
+ do { (flags) |= AuCpdown_##name; } while (0)
+#define au_fclr_cpdown(flags, name) \
+ do { (flags) &= ~AuCpdown_##name; } while (0)
+
+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst,
+ unsigned int *flags)
+{
+ int err;
+ struct dentry *opq_dentry;
+
+ opq_dentry = au_diropq_create(dentry, bdst);
+ err = PTR_ERR(opq_dentry);
+ if (IS_ERR(opq_dentry))
+ goto out;
+ dput(opq_dentry);
+ au_fset_cpdown(*flags, DIROPQ);
+
+out:
+ return err;
+}
+
+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent,
+ struct inode *dir, aufs_bindex_t bdst)
+{
+ int err;
+ struct path h_path;
+ struct au_branch *br;
+
+ br = au_sbr(dentry->d_sb, bdst);
+ h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry))
+ goto out;
+
+ err = 0;
+ if (d_is_positive(h_path.dentry)) {
+ h_path.mnt = au_br_mnt(br);
+ err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path,
+ dentry);
+ }
+ dput(h_path.dentry);
+
+out:
+ return err;
+}
+
+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst,
+ struct au_pin *pin,
+ struct dentry *h_parent, void *arg)
+{
+ int err, rerr;
+ aufs_bindex_t bopq, bstart;
+ struct path h_path;
+ struct dentry *parent;
+ struct inode *h_dir, *h_inode, *inode, *dir;
+ unsigned int *flags = arg;
+
+ bstart = au_dbstart(dentry);
+ /* dentry is di-locked */
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ h_dir = d_inode(h_parent);
+ AuDebugOn(h_dir != au_h_iptr(dir, bdst));
+ IMustLock(h_dir);
+
+ err = au_lkup_neg(dentry, bdst, /*wh*/0);
+ if (unlikely(err < 0))
+ goto out;
+ h_path.dentry = au_h_dptr(dentry, bdst);
+ h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst);
+ err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path,
+ S_IRWXU | S_IRUGO | S_IXUGO);
+ if (unlikely(err))
+ goto out_put;
+ au_fset_cpdown(*flags, MADE_DIR);
+
+ bopq = au_dbdiropq(dentry);
+ au_fclr_cpdown(*flags, WHED);
+ au_fclr_cpdown(*flags, DIROPQ);
+ if (au_dbwh(dentry) == bdst)
+ au_fset_cpdown(*flags, WHED);
+ if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst)
+ au_fset_cpdown(*flags, PARENT_OPQ);
+ h_inode = d_inode(h_path.dentry);
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ if (au_ftest_cpdown(*flags, WHED)) {
+ err = au_cpdown_dir_opq(dentry, bdst, flags);
+ if (unlikely(err)) {
+ inode_unlock(h_inode);
+ goto out_dir;
+ }
+ }
+
+ err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart));
+ inode_unlock(h_inode);
+ if (unlikely(err))
+ goto out_opq;
+
+ if (au_ftest_cpdown(*flags, WHED)) {
+ err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst);
+ if (unlikely(err))
+ goto out_opq;
+ }
+
+ inode = d_inode(dentry);
+ if (au_ibend(inode) < bdst)
+ au_set_ibend(inode, bdst);
+ au_set_h_iptr(inode, bdst, au_igrab(h_inode),
+ au_hi_flags(inode, /*isdir*/1));
+ au_fhsm_wrote(dentry->d_sb, bdst, /*force*/0);
+ goto out; /* success */
+
+ /* revert */
+out_opq:
+ if (au_ftest_cpdown(*flags, DIROPQ)) {
+ inode_lock_nested(h_inode, AuLsc_I_CHILD);
+ rerr = au_diropq_remove(dentry, bdst);
+ inode_unlock(h_inode);
+ if (unlikely(rerr)) {
+ AuIOErr("failed removing diropq for %pd b%d (%d)\n",
+ dentry, bdst, rerr);
+ err = -EIO;
+ goto out;
+ }
+ }
+out_dir:
+ if (au_ftest_cpdown(*flags, MADE_DIR)) {
+ rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path);
+ if (unlikely(rerr)) {
+ AuIOErr("failed removing %pd b%d (%d)\n",
+ dentry, bdst, rerr);
+ err = -EIO;
+ }
+ }
+out_put:
+ au_set_h_dptr(dentry, bdst, NULL);
+ if (au_dbend(dentry) == bdst)
+ au_update_dbend(dentry);
+out:
+ dput(parent);
+ return err;
+}
+
+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst)
+{
+ int err;
+ unsigned int flags;
+
+ flags = 0;
+ err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for create */
+
+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ int err, i, j, ndentry;
+ aufs_bindex_t bopq;
+ struct au_dcsub_pages dpages;
+ struct au_dpage *dpage;
+ struct dentry **dentries, *parent, *d;
+
+ err = au_dpages_init(&dpages, GFP_NOFS);
+ if (unlikely(err))
+ goto out;
+ parent = dget_parent(dentry);
+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0);
+ if (unlikely(err))
+ goto out_free;
+
+ err = bindex;
+ for (i = 0; i < dpages.ndpage; i++) {
+ dpage = dpages.dpages + i;
+ dentries = dpage->dentries;
+ ndentry = dpage->ndentry;
+ for (j = 0; j < ndentry; j++) {
+ d = dentries[j];
+ di_read_lock_parent2(d, !AuLock_IR);
+ bopq = au_dbdiropq(d);
+ di_read_unlock(d, !AuLock_IR);
+ if (bopq >= 0 && bopq < err)
+ err = bopq;
+ }
+ }
+
+out_free:
+ dput(parent);
+ au_dpages_free(&dpages);
+out:
+ return err;
+}
+
+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex)
+{
+ for (; bindex >= 0; bindex--)
+ if (!au_br_rdonly(au_sbr(sb, bindex)))
+ return bindex;
+ return -EROFS;
+}
+
+/* top down parent */
+static int au_wbr_create_tdp(struct dentry *dentry,
+ unsigned int flags __maybe_unused)
+{
+ int err;
+ aufs_bindex_t bstart, bindex;
+ struct super_block *sb;
+ struct dentry *parent, *h_parent;
+
+ sb = dentry->d_sb;
+ bstart = au_dbstart(dentry);
+ err = bstart;
+ if (!au_br_rdonly(au_sbr(sb, bstart)))
+ goto out;
+
+ err = -EROFS;
+ parent = dget_parent(dentry);
+ for (bindex = au_dbstart(parent); bindex < bstart; bindex++) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+
+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
+ err = bindex;
+ break;
+ }
+ }
+ dput(parent);
+
+ /* bottom up here */
+ if (unlikely(err < 0)) {
+ err = au_wbr_bu(sb, bstart - 1);
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+ }
+
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* an exception for the policy other than tdp */
+static int au_wbr_create_exp(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bwh, bdiropq;
+ struct dentry *parent;
+
+ err = -1;
+ bwh = au_dbwh(dentry);
+ parent = dget_parent(dentry);
+ bdiropq = au_dbdiropq(parent);
+ if (bwh >= 0) {
+ if (bdiropq >= 0)
+ err = min(bdiropq, bwh);
+ else
+ err = bwh;
+ AuDbg("%d\n", err);
+ } else if (bdiropq >= 0) {
+ err = bdiropq;
+ AuDbg("%d\n", err);
+ }
+ dput(parent);
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+ if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err)))
+ err = -1;
+
+ AuDbg("%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* round robin */
+static int au_wbr_create_init_rr(struct super_block *sb)
+{
+ int err;
+
+ err = au_wbr_bu(sb, au_sbend(sb));
+ atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */
+ /* smp_mb(); */
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags)
+{
+ int err, nbr;
+ unsigned int u;
+ aufs_bindex_t bindex, bend;
+ struct super_block *sb;
+ atomic_t *next;
+
+ err = au_wbr_create_exp(dentry);
+ if (err >= 0)
+ goto out;
+
+ sb = dentry->d_sb;
+ next = &au_sbi(sb)->si_wbr_rr_next;
+ bend = au_sbend(sb);
+ nbr = bend + 1;
+ for (bindex = 0; bindex <= bend; bindex++) {
+ if (!au_ftest_wbr(flags, DIR)) {
+ err = atomic_dec_return(next) + 1;
+ /* modulo for 0 is meaningless */
+ if (unlikely(!err))
+ err = atomic_dec_return(next) + 1;
+ } else
+ err = atomic_read(next);
+ AuDbg("%d\n", err);
+ u = err;
+ err = u % nbr;
+ AuDbg("%d\n", err);
+ if (!au_br_rdonly(au_sbr(sb, err)))
+ break;
+ err = -EROFS;
+ }
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+out:
+ AuDbg("%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space */
+static void au_mfs(struct dentry *dentry, struct dentry *parent)
+{
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_wbr_mfs *mfs;
+ struct dentry *h_parent;
+ aufs_bindex_t bindex, bend;
+ int err;
+ unsigned long long b, bavail;
+ struct path h_path;
+ /* reduce the stack usage */
+ struct kstatfs *st;
+
+ st = kmalloc(sizeof(*st), GFP_NOFS);
+ if (unlikely(!st)) {
+ AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM);
+ return;
+ }
+
+ bavail = 0;
+ sb = dentry->d_sb;
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ MtxMustLock(&mfs->mfs_lock);
+ mfs->mfs_bindex = -EROFS;
+ mfs->mfsrr_bytes = 0;
+ if (!parent) {
+ bindex = 0;
+ bend = au_sbend(sb);
+ } else {
+ bindex = au_dbstart(parent);
+ bend = au_dbtaildir(parent);
+ }
+
+ for (; bindex <= bend; bindex++) {
+ if (parent) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+ }
+ br = au_sbr(sb, bindex);
+ if (au_br_rdonly(br))
+ continue;
+
+ /* sb->s_root for NFS is unreliable */
+ h_path.mnt = au_br_mnt(br);
+ h_path.dentry = h_path.mnt->mnt_root;
+ err = vfs_statfs(&h_path, st);
+ if (unlikely(err)) {
+ AuWarn1("failed statfs, b%d, %d\n", bindex, err);
+ continue;
+ }
+
+ /* when the available size is equal, select the lower one */
+ BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail)
+ || sizeof(b) < sizeof(st->f_bsize));
+ b = st->f_bavail * st->f_bsize;
+ br->br_wbr->wbr_bytes = b;
+ if (b >= bavail) {
+ bavail = b;
+ mfs->mfs_bindex = bindex;
+ mfs->mfs_jiffy = jiffies;
+ }
+ }
+
+ mfs->mfsrr_bytes = bavail;
+ AuDbg("b%d\n", mfs->mfs_bindex);
+ kfree(st);
+}
+
+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ struct dentry *parent;
+ struct super_block *sb;
+ struct au_wbr_mfs *mfs;
+
+ err = au_wbr_create_exp(dentry);
+ if (err >= 0)
+ goto out;
+
+ sb = dentry->d_sb;
+ parent = NULL;
+ if (au_ftest_wbr(flags, PARENT))
+ parent = dget_parent(dentry);
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire)
+ || mfs->mfs_bindex < 0
+ || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex)))
+ au_mfs(dentry, parent);
+ mutex_unlock(&mfs->mfs_lock);
+ err = mfs->mfs_bindex;
+ dput(parent);
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_create_init_mfs(struct super_block *sb)
+{
+ struct au_wbr_mfs *mfs;
+
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_init(&mfs->mfs_lock);
+ mfs->mfs_jiffy = 0;
+ mfs->mfs_bindex = -EROFS;
+
+ return 0;
+}
+
+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused)
+{
+ mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock);
+ return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* most free space and then round robin */
+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ struct au_wbr_mfs *mfs;
+
+ err = au_wbr_create_mfs(dentry, flags);
+ if (err >= 0) {
+ mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ if (mfs->mfsrr_bytes < mfs->mfsrr_watermark)
+ err = au_wbr_create_rr(dentry, flags);
+ mutex_unlock(&mfs->mfs_lock);
+ }
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_create_init_mfsrr(struct super_block *sb)
+{
+ int err;
+
+ au_wbr_create_init_mfs(sb); /* ignore */
+ err = au_wbr_create_init_rr(sb);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* top down parent and most free space */
+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags)
+{
+ int err, e2;
+ unsigned long long b;
+ aufs_bindex_t bindex, bstart, bend;
+ struct super_block *sb;
+ struct dentry *parent, *h_parent;
+ struct au_branch *br;
+
+ err = au_wbr_create_tdp(dentry, flags);
+ if (unlikely(err < 0))
+ goto out;
+ parent = dget_parent(dentry);
+ bstart = au_dbstart(parent);
+ bend = au_dbtaildir(parent);
+ if (bstart == bend)
+ goto out_parent; /* success */
+
+ e2 = au_wbr_create_mfs(dentry, flags);
+ if (e2 < 0)
+ goto out_parent; /* success */
+
+ /* when the available size is equal, select upper one */
+ sb = dentry->d_sb;
+ br = au_sbr(sb, err);
+ b = br->br_wbr->wbr_bytes;
+ AuDbg("b%d, %llu\n", err, b);
+
+ for (bindex = bstart; bindex <= bend; bindex++) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+
+ br = au_sbr(sb, bindex);
+ if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) {
+ b = br->br_wbr->wbr_bytes;
+ err = bindex;
+ AuDbg("b%d, %llu\n", err, b);
+ }
+ }
+
+ if (err >= 0)
+ err = au_wbr_nonopq(dentry, err);
+
+out_parent:
+ dput(parent);
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * - top down parent
+ * - most free space with parent
+ * - most free space round-robin regardless parent
+ */
+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags)
+{
+ int err;
+ unsigned long long watermark;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct au_wbr_mfs *mfs;
+
+ err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT);
+ if (unlikely(err < 0))
+ goto out;
+
+ sb = dentry->d_sb;
+ br = au_sbr(sb, err);
+ mfs = &au_sbi(sb)->si_wbr_mfs;
+ mutex_lock(&mfs->mfs_lock);
+ watermark = mfs->mfsrr_watermark;
+ mutex_unlock(&mfs->mfs_lock);
+ if (br->br_wbr->wbr_bytes < watermark)
+ /* regardless the parent dir */
+ err = au_wbr_create_mfsrr(dentry, flags);
+
+out:
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* policies for copyup */
+
+/* top down parent */
+static int au_wbr_copyup_tdp(struct dentry *dentry)
+{
+ return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0);
+}
+
+/* bottom up parent */
+static int au_wbr_copyup_bup(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bindex, bstart;
+ struct dentry *parent, *h_parent;
+ struct super_block *sb;
+
+ err = -EROFS;
+ sb = dentry->d_sb;
+ parent = dget_parent(dentry);
+ bstart = au_dbstart(parent);
+ for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) {
+ h_parent = au_h_dptr(parent, bindex);
+ if (!h_parent || d_is_negative(h_parent))
+ continue;
+
+ if (!au_br_rdonly(au_sbr(sb, bindex))) {
+ err = bindex;
+ break;
+ }
+ }
+ dput(parent);
+
+ /* bottom up here */
+ if (unlikely(err < 0))
+ err = au_wbr_bu(sb, bstart - 1);
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+/* bottom up */
+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart)
+{
+ int err;
+
+ err = au_wbr_bu(dentry->d_sb, bstart);
+ AuDbg("b%d\n", err);
+ if (err > bstart)
+ err = au_wbr_nonopq(dentry, err);
+
+ AuDbg("b%d\n", err);
+ return err;
+}
+
+static int au_wbr_copyup_bu(struct dentry *dentry)
+{
+ int err;
+ aufs_bindex_t bstart;
+
+ bstart = au_dbstart(dentry);
+ err = au_wbr_do_copyup_bu(dentry, bstart);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = {
+ [AuWbrCopyup_TDP] = {
+ .copyup = au_wbr_copyup_tdp
+ },
+ [AuWbrCopyup_BUP] = {
+ .copyup = au_wbr_copyup_bup
+ },
+ [AuWbrCopyup_BU] = {
+ .copyup = au_wbr_copyup_bu
+ }
+};
+
+struct au_wbr_create_operations au_wbr_create_ops[] = {
+ [AuWbrCreate_TDP] = {
+ .create = au_wbr_create_tdp
+ },
+ [AuWbrCreate_RR] = {
+ .create = au_wbr_create_rr,
+ .init = au_wbr_create_init_rr
+ },
+ [AuWbrCreate_MFS] = {
+ .create = au_wbr_create_mfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_MFSV] = {
+ .create = au_wbr_create_mfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_MFSRR] = {
+ .create = au_wbr_create_mfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_MFSRRV] = {
+ .create = au_wbr_create_mfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFS] = {
+ .create = au_wbr_create_pmfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFSV] = {
+ .create = au_wbr_create_pmfs,
+ .init = au_wbr_create_init_mfs,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFSRR] = {
+ .create = au_wbr_create_pmfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ },
+ [AuWbrCreate_PMFSRRV] = {
+ .create = au_wbr_create_pmfsrr,
+ .init = au_wbr_create_init_mfsrr,
+ .fin = au_wbr_create_fin_mfs
+ }
+};
diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c
new file mode 100644
index 000000000..fdda85eef
--- /dev/null
+++ b/fs/aufs/whout.c
@@ -0,0 +1,1047 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#include "aufs.h"
+
+#define WH_MASK S_IRUGO
+
+/*
+ * If a directory contains this file, then it is opaque. We start with the
+ * .wh. flag so that it is blocked by lookup.
+ */
+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ,
+ sizeof(AUFS_WH_DIROPQ) - 1);
+
+/*
+ * generate whiteout name, which is NOT terminated by NULL.
+ * @name: original d_name.name
+ * @len: original d_name.len
+ * @wh: whiteout qstr
+ * returns zero when succeeds, otherwise error.
+ * succeeded value as wh->name should be freed by kfree().
+ */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name)
+{
+ char *p;
+
+ if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN))
+ return -ENAMETOOLONG;
+
+ wh->len = name->len + AUFS_WH_PFX_LEN;
+ p = kmalloc(wh->len, GFP_NOFS);
+ wh->name = p;
+ if (p) {
+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+ memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len);
+ /* smp_mb(); */
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * test if the @wh_name exists under @h_parent.
+ * @try_sio specifies the necessary of super-io.
+ */
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio)
+{
+ int err;
+ struct dentry *wh_dentry;
+
+ if (!try_sio)
+ wh_dentry = vfsub_lkup_one(wh_name, h_parent);
+ else
+ wh_dentry = au_sio_lkup_one(wh_name, h_parent);
+ err = PTR_ERR(wh_dentry);
+ if (IS_ERR(wh_dentry)) {
+ if (err == -ENAMETOOLONG)
+ err = 0;
+ goto out;
+ }
+
+ err = 0;
+ if (d_is_negative(wh_dentry))
+ goto out_wh; /* success */
+
+ err = 1;
+ if (d_is_reg(wh_dentry))
+ goto out_wh; /* success */
+
+ err = -EIO;
+ AuIOErr("%pd Invalid whiteout entry type 0%o.\n",
+ wh_dentry, d_inode(wh_dentry)->i_mode);
+
+out_wh:
+ dput(wh_dentry);
+out:
+ return err;
+}
+
+/*
+ * test if the @h_dentry sets opaque or not.
+ */
+int au_diropq_test(struct dentry *h_dentry)
+{
+ int err;
+ struct inode *h_dir;
+
+ h_dir = d_inode(h_dentry);
+ err = au_wh_test(h_dentry, &diropq_name,
+ au_test_h_perm_sio(h_dir, MAY_EXEC));
+ return err;
+}
+
+/*
+ * returns a negative dentry whose name is unique and temporary.
+ */
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+ struct qstr *prefix)
+{
+ struct dentry *dentry;
+ int i;
+ char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1],
+ *name, *p;
+ /* strict atomic_t is unnecessary here */
+ static unsigned short cnt;
+ struct qstr qs;
+
+ BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN);
+
+ name = defname;
+ qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1;
+ if (unlikely(prefix->len > DNAME_INLINE_LEN)) {
+ dentry = ERR_PTR(-ENAMETOOLONG);
+ if (unlikely(qs.len > NAME_MAX))
+ goto out;
+ dentry = ERR_PTR(-ENOMEM);
+ name = kmalloc(qs.len + 1, GFP_NOFS);
+ if (unlikely(!name))
+ goto out;
+ }
+
+ /* doubly whiteout-ed */
+ memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2);
+ p = name + AUFS_WH_PFX_LEN * 2;
+ memcpy(p, prefix->name, prefix->len);
+ p += prefix->len;
+ *p++ = '.';
+ AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN);
+
+ qs.name = name;
+ for (i = 0; i < 3; i++) {
+ sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++);
+ dentry = au_sio_lkup_one(&qs, h_parent);
+ if (IS_ERR(dentry) || d_is_negative(dentry))
+ goto out_name;
+ dput(dentry);
+ }
+ /* pr_warn("could not get random name\n"); */
+ dentry = ERR_PTR(-EEXIST);
+ AuDbg("%.*s\n", AuLNPair(&qs));
+ BUG();
+
+out_name:
+ if (name != defname)
+ kfree(name);
+out:
+ AuTraceErrPtr(dentry);
+ return dentry;
+}
+
+/*
+ * rename the @h_dentry on @br to the whiteouted temporary name.
+ */
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br)
+{
+ int err;
+ struct path h_path = {
+ .mnt = au_br_mnt(br)
+ };
+ struct inode *h_dir, *delegated;
+ struct dentry *h_parent;
+
+ h_parent = h_dentry->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+
+ h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name);
+ err = PTR_ERR(h_path.dentry);
+ if (IS_ERR(h_path.dentry))
+ goto out;
+
+ /* under the same dir, no need to lock_rename() */
+ delegated = NULL;
+ err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path, &delegated);
+ AuTraceErr(err);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal rename\n");
+ iput(delegated);
+ }
+ dput(h_path.dentry);
+
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * functions for removing a whiteout
+ */
+
+static int do_unlink_wh(struct inode *h_dir, struct path *h_path)
+{
+ int err, force;
+ struct inode *delegated;
+
+ /*
+ * forces superio when the dir has a sticky bit.
+ * this may be a violation of unix fs semantics.
+ */
+ force = (h_dir->i_mode & S_ISVTX)
+ && !uid_eq(current_fsuid(), d_inode(h_path->dentry)->i_uid);
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, h_path, &delegated, force);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ return err;
+}
+
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+ struct dentry *dentry)
+{
+ int err;
+
+ err = do_unlink_wh(h_dir, h_path);
+ if (!err && dentry)
+ au_set_dbwh(dentry, -1);
+
+ return err;
+}
+
+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh,
+ struct au_branch *br)
+{
+ int err;
+ struct path h_path = {
+ .mnt = au_br_mnt(br)
+ };
+
+ err = 0;
+ h_path.dentry = vfsub_lkup_one(wh, h_parent);
+ if (IS_ERR(h_path.dentry))
+ err = PTR_ERR(h_path.dentry);
+ else {
+ if (d_is_reg(h_path.dentry))
+ err = do_unlink_wh(d_inode(h_parent), &h_path);
+ dput(h_path.dentry);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * initialize/clean whiteout for a branch
+ */
+
+static void au_wh_clean(struct inode *h_dir, struct path *whpath,
+ const int isdir)
+{
+ int err;
+ struct inode *delegated;
+
+ if (d_is_negative(whpath->dentry))
+ return;
+
+ if (isdir)
+ err = vfsub_rmdir(h_dir, whpath);
+ else {
+ delegated = NULL;
+ err = vfsub_unlink(h_dir, whpath, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ }
+ if (unlikely(err))
+ pr_warn("failed removing %pd (%d), ignored.\n",
+ whpath->dentry, err);
+}
+
+static int test_linkable(struct dentry *h_root)
+{
+ struct inode *h_dir = d_inode(h_root);
+
+ if (h_dir->i_op->link)
+ return 0;
+
+ pr_err("%pd (%s) doesn't support link(2), use noplink and rw+nolwh\n",
+ h_root, au_sbtype(h_root->d_sb));
+ return -ENOSYS;
+}
+
+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */
+static int au_whdir(struct inode *h_dir, struct path *path)
+{
+ int err;
+
+ err = -EEXIST;
+ if (d_is_negative(path->dentry)) {
+ int mode = S_IRWXU;
+
+ if (au_test_nfs(path->dentry->d_sb))
+ mode |= S_IXUGO;
+ err = vfsub_mkdir(h_dir, path, mode);
+ } else if (d_is_dir(path->dentry))
+ err = 0;
+ else
+ pr_err("unknown %pd exists\n", path->dentry);
+
+ return err;
+}
+
+struct au_wh_base {
+ const struct qstr *name;
+ struct dentry *dentry;
+};
+
+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[],
+ struct path *h_path)
+{
+ h_path->dentry = base[AuBrWh_BASE].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/0);
+ h_path->dentry = base[AuBrWh_PLINK].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+ h_path->dentry = base[AuBrWh_ORPH].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+}
+
+/*
+ * returns tri-state,
+ * minus: error, caller should print the message
+ * zero: succuess
+ * plus: error, caller should NOT print the message
+ */
+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr,
+ int do_plink, struct au_wh_base base[],
+ struct path *h_path)
+{
+ int err;
+ struct inode *h_dir;
+
+ h_dir = d_inode(h_root);
+ h_path->dentry = base[AuBrWh_BASE].dentry;
+ au_wh_clean(h_dir, h_path, /*isdir*/0);
+ h_path->dentry = base[AuBrWh_PLINK].dentry;
+ if (do_plink) {
+ err = test_linkable(h_root);
+ if (unlikely(err)) {
+ err = 1;
+ goto out;
+ }
+
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+ } else
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+ h_path->dentry = base[AuBrWh_ORPH].dentry;
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+ return err;
+}
+
+/*
+ * for the moment, aufs supports the branch filesystem which does not support
+ * link(2). testing on FAT which does not support i_op->setattr() fully either,
+ * copyup failed. finally, such filesystem will not be used as the writable
+ * branch.
+ *
+ * returns tri-state, see above.
+ */
+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr,
+ int do_plink, struct au_wh_base base[],
+ struct path *h_path)
+{
+ int err;
+ struct inode *h_dir;
+
+ WbrWhMustWriteLock(wbr);
+
+ err = test_linkable(h_root);
+ if (unlikely(err)) {
+ err = 1;
+ goto out;
+ }
+
+ /*
+ * todo: should this create be done in /sbin/mount.aufs helper?
+ */
+ err = -EEXIST;
+ h_dir = d_inode(h_root);
+ if (d_is_negative(base[AuBrWh_BASE].dentry)) {
+ h_path->dentry = base[AuBrWh_BASE].dentry;
+ err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true);
+ } else if (d_is_reg(base[AuBrWh_BASE].dentry))
+ err = 0;
+ else
+ pr_err("unknown %pd2 exists\n", base[AuBrWh_BASE].dentry);
+ if (unlikely(err))
+ goto out;
+
+ h_path->dentry = base[AuBrWh_PLINK].dentry;
+ if (do_plink) {
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry);
+ } else
+ au_wh_clean(h_dir, h_path, /*isdir*/1);
+ wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry);
+
+ h_path->dentry = base[AuBrWh_ORPH].dentry;
+ err = au_whdir(h_dir, h_path);
+ if (unlikely(err))
+ goto out;
+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry);
+
+out:
+ return err;
+}
+
+/*
+ * initialize the whiteout base file/dir for @br.
+ */
+int au_wh_init(struct au_branch *br, struct super_block *sb)
+{
+ int err, i;
+ const unsigned char do_plink
+ = !!au_opt_test(au_mntflags(sb), PLINK);
+ struct inode *h_dir;
+ struct path path = br->br_path;
+ struct dentry *h_root = path.dentry;
+ struct au_wbr *wbr = br->br_wbr;
+ static const struct qstr base_name[] = {
+ [AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME,
+ sizeof(AUFS_BASE_NAME) - 1),
+ [AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME,
+ sizeof(AUFS_PLINKDIR_NAME) - 1),
+ [AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME,
+ sizeof(AUFS_ORPHDIR_NAME) - 1)
+ };
+ struct au_wh_base base[] = {
+ [AuBrWh_BASE] = {
+ .name = base_name + AuBrWh_BASE,
+ .dentry = NULL
+ },
+ [AuBrWh_PLINK] = {
+ .name = base_name + AuBrWh_PLINK,
+ .dentry = NULL
+ },
+ [AuBrWh_ORPH] = {
+ .name = base_name + AuBrWh_ORPH,
+ .dentry = NULL
+ }
+ };
+
+ if (wbr)
+ WbrWhMustWriteLock(wbr);
+
+ for (i = 0; i < AuBrWh_Last; i++) {
+ /* doubly whiteouted */
+ struct dentry *d;
+
+ d = au_wh_lkup(h_root, (void *)base[i].name, br);
+ err = PTR_ERR(d);
+ if (IS_ERR(d))
+ goto out;
+
+ base[i].dentry = d;
+ AuDebugOn(wbr
+ && wbr->wbr_wh[i]
+ && wbr->wbr_wh[i] != base[i].dentry);
+ }
+
+ if (wbr)
+ for (i = 0; i < AuBrWh_Last; i++) {
+ dput(wbr->wbr_wh[i]);
+ wbr->wbr_wh[i] = NULL;
+ }
+
+ err = 0;
+ if (!au_br_writable(br->br_perm)) {
+ h_dir = d_inode(h_root);
+ au_wh_init_ro(h_dir, base, &path);
+ } else if (!au_br_wh_linkable(br->br_perm)) {
+ err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path);
+ if (err > 0)
+ goto out;
+ else if (err)
+ goto out_err;
+ } else {
+ err = au_wh_init_rw(h_root, wbr, do_plink, base, &path);
+ if (err > 0)
+ goto out;
+ else if (err)
+ goto out_err;
+ }
+ goto out; /* success */
+
+out_err:
+ pr_err("an error(%d) on the writable branch %pd(%s)\n",
+ err, h_root, au_sbtype(h_root->d_sb));
+out:
+ for (i = 0; i < AuBrWh_Last; i++)
+ dput(base[i].dentry);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * whiteouts are all hard-linked usually.
+ * when its link count reaches a ceiling, we create a new whiteout base
+ * asynchronously.
+ */
+
+struct reinit_br_wh {
+ struct super_block *sb;
+ struct au_branch *br;
+};
+
+static void reinit_br_wh(void *arg)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct path h_path;
+ struct reinit_br_wh *a = arg;
+ struct au_wbr *wbr;
+ struct inode *dir, *delegated;
+ struct dentry *h_root;
+ struct au_hinode *hdir;
+
+ err = 0;
+ wbr = a->br->br_wbr;
+ /* big aufs lock */
+ si_noflush_write_lock(a->sb);
+ if (!au_br_writable(a->br->br_perm))
+ goto out;
+ bindex = au_br_index(a->sb, a->br->br_id);
+ if (unlikely(bindex < 0))
+ goto out;
+
+ di_read_lock_parent(a->sb->s_root, AuLock_IR);
+ dir = d_inode(a->sb->s_root);
+ hdir = au_hi(dir, bindex);
+ h_root = au_h_dptr(a->sb->s_root, bindex);
+ AuDebugOn(h_root != au_br_dentry(a->br));
+
+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+ wbr_wh_write_lock(wbr);
+ err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode,
+ h_root, a->br);
+ if (!err) {
+ h_path.dentry = wbr->wbr_whbase;
+ h_path.mnt = au_br_mnt(a->br);
+ delegated = NULL;
+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated,
+ /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ } else {
+ pr_warn("%pd is moved, ignored\n", wbr->wbr_whbase);
+ err = 0;
+ }
+ dput(wbr->wbr_whbase);
+ wbr->wbr_whbase = NULL;
+ if (!err)
+ err = au_wh_init(a->br, a->sb);
+ wbr_wh_write_unlock(wbr);
+ au_hn_imtx_unlock(hdir);
+ di_read_unlock(a->sb->s_root, AuLock_IR);
+ if (!err)
+ au_fhsm_wrote(a->sb, bindex, /*force*/0);
+
+out:
+ if (wbr)
+ atomic_dec(&wbr->wbr_wh_running);
+ atomic_dec(&a->br->br_count);
+ si_write_unlock(a->sb);
+ au_nwt_done(&au_sbi(a->sb)->si_nowait);
+ kfree(arg);
+ if (unlikely(err))
+ AuIOErr("err %d\n", err);
+}
+
+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br)
+{
+ int do_dec, wkq_err;
+ struct reinit_br_wh *arg;
+
+ do_dec = 1;
+ if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1)
+ goto out;
+
+ /* ignore ENOMEM */
+ arg = kmalloc(sizeof(*arg), GFP_NOFS);
+ if (arg) {
+ /*
+ * dec(wh_running), kfree(arg) and dec(br_count)
+ * in reinit function
+ */
+ arg->sb = sb;
+ arg->br = br;
+ atomic_inc(&br->br_count);
+ wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0);
+ if (unlikely(wkq_err)) {
+ atomic_dec(&br->br_wbr->wbr_wh_running);
+ atomic_dec(&br->br_count);
+ kfree(arg);
+ }
+ do_dec = 0;
+ }
+
+out:
+ if (do_dec)
+ atomic_dec(&br->br_wbr->wbr_wh_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create the whiteout @wh.
+ */
+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex,
+ struct dentry *wh)
+{
+ int err;
+ struct path h_path = {
+ .dentry = wh
+ };
+ struct au_branch *br;
+ struct au_wbr *wbr;
+ struct dentry *h_parent;
+ struct inode *h_dir, *delegated;
+
+ h_parent = wh->d_parent; /* dir inode is locked */
+ h_dir = d_inode(h_parent);
+ IMustLock(h_dir);
+
+ br = au_sbr(sb, bindex);
+ h_path.mnt = au_br_mnt(br);
+ wbr = br->br_wbr;
+ wbr_wh_read_lock(wbr);
+ if (wbr->wbr_whbase) {
+ delegated = NULL;
+ err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path, &delegated);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal link\n");
+ iput(delegated);
+ }
+ if (!err || err != -EMLINK)
+ goto out;
+
+ /* link count full. re-initialize br_whbase. */
+ kick_reinit_br_wh(sb, br);
+ }
+
+ /* return this error in this context */
+ err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true);
+ if (!err)
+ au_fhsm_wrote(sb, bindex, /*force*/0);
+
+out:
+ wbr_wh_read_unlock(wbr);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create or remove the diropq.
+ */
+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int flags)
+{
+ struct dentry *opq_dentry, *h_dentry;
+ struct super_block *sb;
+ struct au_branch *br;
+ int err;
+
+ sb = dentry->d_sb;
+ br = au_sbr(sb, bindex);
+ h_dentry = au_h_dptr(dentry, bindex);
+ opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry);
+ if (IS_ERR(opq_dentry))
+ goto out;
+
+ if (au_ftest_diropq(flags, CREATE)) {
+ err = link_or_create_wh(sb, bindex, opq_dentry);
+ if (!err) {
+ au_set_dbdiropq(dentry, bindex);
+ goto out; /* success */
+ }
+ } else {
+ struct path tmp = {
+ .dentry = opq_dentry,
+ .mnt = au_br_mnt(br)
+ };
+ err = do_unlink_wh(au_h_iptr(d_inode(dentry), bindex), &tmp);
+ if (!err)
+ au_set_dbdiropq(dentry, -1);
+ }
+ dput(opq_dentry);
+ opq_dentry = ERR_PTR(err);
+
+out:
+ return opq_dentry;
+}
+
+struct do_diropq_args {
+ struct dentry **errp;
+ struct dentry *dentry;
+ aufs_bindex_t bindex;
+ unsigned int flags;
+};
+
+static void call_do_diropq(void *args)
+{
+ struct do_diropq_args *a = args;
+ *a->errp = do_diropq(a->dentry, a->bindex, a->flags);
+}
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int flags)
+{
+ struct dentry *diropq, *h_dentry;
+
+ h_dentry = au_h_dptr(dentry, bindex);
+ if (!au_test_h_perm_sio(d_inode(h_dentry), MAY_EXEC | MAY_WRITE))
+ diropq = do_diropq(dentry, bindex, flags);
+ else {
+ int wkq_err;
+ struct do_diropq_args args = {
+ .errp = &diropq,
+ .dentry = dentry,
+ .bindex = bindex,
+ .flags = flags
+ };
+
+ wkq_err = au_wkq_wait(call_do_diropq, &args);
+ if (unlikely(wkq_err))
+ diropq = ERR_PTR(wkq_err);
+ }
+
+ return diropq;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * lookup whiteout dentry.
+ * @h_parent: lower parent dentry which must exist and be locked
+ * @base_name: name of dentry which will be whiteouted
+ * returns dentry for whiteout.
+ */
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+ struct au_branch *br)
+{
+ int err;
+ struct qstr wh_name;
+ struct dentry *wh_dentry;
+
+ err = au_wh_name_alloc(&wh_name, base_name);
+ wh_dentry = ERR_PTR(err);
+ if (!err) {
+ wh_dentry = vfsub_lkup_one(&wh_name, h_parent);
+ kfree(wh_name.name);
+ }
+ return wh_dentry;
+}
+
+/*
+ * link/create a whiteout for @dentry on @bindex.
+ */
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent)
+{
+ struct dentry *wh_dentry;
+ struct super_block *sb;
+ int err;
+
+ sb = dentry->d_sb;
+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex));
+ if (!IS_ERR(wh_dentry) && d_is_negative(wh_dentry)) {
+ err = link_or_create_wh(sb, bindex, wh_dentry);
+ if (!err) {
+ au_set_dbwh(dentry, bindex);
+ au_fhsm_wrote(sb, bindex, /*force*/0);
+ } else {
+ dput(wh_dentry);
+ wh_dentry = ERR_PTR(err);
+ }
+ }
+
+ return wh_dentry;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* Delete all whiteouts in this directory on branch bindex. */
+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist,
+ aufs_bindex_t bindex, struct au_branch *br)
+{
+ int err;
+ unsigned long ul, n;
+ struct qstr wh_name;
+ char *p;
+ struct hlist_head *head;
+ struct au_vdir_wh *pos;
+ struct au_vdir_destr *str;
+
+ err = -ENOMEM;
+ p = (void *)__get_free_page(GFP_NOFS);
+ wh_name.name = p;
+ if (unlikely(!wh_name.name))
+ goto out;
+
+ err = 0;
+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN);
+ p += AUFS_WH_PFX_LEN;
+ n = whlist->nh_num;
+ head = whlist->nh_head;
+ for (ul = 0; !err && ul < n; ul++, head++) {
+ hlist_for_each_entry(pos, head, wh_hash) {
+ if (pos->wh_bindex != bindex)
+ continue;
+
+ str = &pos->wh_str;
+ if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) {
+ memcpy(p, str->name, str->len);
+ wh_name.len = AUFS_WH_PFX_LEN + str->len;
+ err = unlink_wh_name(h_dentry, &wh_name, br);
+ if (!err)
+ continue;
+ break;
+ }
+ AuIOErr("whiteout name too long %.*s\n",
+ str->len, str->name);
+ err = -EIO;
+ break;
+ }
+ }
+ free_page((unsigned long)wh_name.name);
+
+out:
+ return err;
+}
+
+struct del_wh_children_args {
+ int *errp;
+ struct dentry *h_dentry;
+ struct au_nhash *whlist;
+ aufs_bindex_t bindex;
+ struct au_branch *br;
+};
+
+static void call_del_wh_children(void *args)
+{
+ struct del_wh_children_args *a = args;
+ *a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br);
+}
+
+/* ---------------------------------------------------------------------- */
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp)
+{
+ struct au_whtmp_rmdir *whtmp;
+ int err;
+ unsigned int rdhash;
+
+ SiMustAnyLock(sb);
+
+ whtmp = kzalloc(sizeof(*whtmp), gfp);
+ if (unlikely(!whtmp)) {
+ whtmp = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ /* no estimation for dir size */
+ rdhash = au_sbi(sb)->si_rdhash;
+ if (!rdhash)
+ rdhash = AUFS_RDHASH_DEF;
+ err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp);
+ if (unlikely(err)) {
+ kfree(whtmp);
+ whtmp = ERR_PTR(err);
+ }
+
+out:
+ return whtmp;
+}
+
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp)
+{
+ if (whtmp->br)
+ atomic_dec(&whtmp->br->br_count);
+ dput(whtmp->wh_dentry);
+ iput(whtmp->dir);
+ au_nhash_wh_free(&whtmp->whlist);
+ kfree(whtmp);
+}
+
+/*
+ * rmdir the whiteouted temporary named dir @h_dentry.
+ * @whlist: whiteouted children.
+ */
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_nhash *whlist)
+{
+ int err;
+ unsigned int h_nlink;
+ struct path h_tmp;
+ struct inode *wh_inode, *h_dir;
+ struct au_branch *br;
+
+ h_dir = d_inode(wh_dentry->d_parent); /* dir inode is locked */
+ IMustLock(h_dir);
+
+ br = au_sbr(dir->i_sb, bindex);
+ wh_inode = d_inode(wh_dentry);
+ inode_lock_nested(wh_inode, AuLsc_I_CHILD);
+
+ /*
+ * someone else might change some whiteouts while we were sleeping.
+ * it means this whlist may have an obsoleted entry.
+ */
+ if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE))
+ err = del_wh_children(wh_dentry, whlist, bindex, br);
+ else {
+ int wkq_err;
+ struct del_wh_children_args args = {
+ .errp = &err,
+ .h_dentry = wh_dentry,
+ .whlist = whlist,
+ .bindex = bindex,
+ .br = br
+ };
+
+ wkq_err = au_wkq_wait(call_del_wh_children, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+ }
+ inode_unlock(wh_inode);
+
+ if (!err) {
+ h_tmp.dentry = wh_dentry;
+ h_tmp.mnt = au_br_mnt(br);
+ h_nlink = h_dir->i_nlink;
+ err = vfsub_rmdir(h_dir, &h_tmp);
+ /* some fs doesn't change the parent nlink in some cases */
+ h_nlink -= h_dir->i_nlink;
+ }
+
+ if (!err) {
+ if (au_ibstart(dir) == bindex) {
+ /* todo: dir->i_mutex is necessary */
+ au_cpup_attr_timesizes(dir);
+ if (h_nlink)
+ vfsub_drop_nlink(dir);
+ }
+ return 0; /* success */
+ }
+
+ pr_warn("failed removing %pd(%d), ignored\n", wh_dentry, err);
+ return err;
+}
+
+static void call_rmdir_whtmp(void *args)
+{
+ int err;
+ aufs_bindex_t bindex;
+ struct au_whtmp_rmdir *a = args;
+ struct super_block *sb;
+ struct dentry *h_parent;
+ struct inode *h_dir;
+ struct au_hinode *hdir;
+
+ /* rmdir by nfsd may cause deadlock with this i_mutex */
+ /* inode_lock(a->dir); */
+ err = -EROFS;
+ sb = a->dir->i_sb;
+ si_read_lock(sb, !AuLock_FLUSH);
+ if (!au_br_writable(a->br->br_perm))
+ goto out;
+ bindex = au_br_index(sb, a->br->br_id);
+ if (unlikely(bindex < 0))
+ goto out;
+
+ err = -EIO;
+ ii_write_lock_parent(a->dir);
+ h_parent = dget_parent(a->wh_dentry);
+ h_dir = d_inode(h_parent);
+ hdir = au_hi(a->dir, bindex);
+ err = vfsub_mnt_want_write(au_br_mnt(a->br));
+ if (unlikely(err))
+ goto out_mnt;
+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT);
+ err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent,
+ a->br);
+ if (!err)
+ err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist);
+ au_hn_imtx_unlock(hdir);
+ vfsub_mnt_drop_write(au_br_mnt(a->br));
+
+out_mnt:
+ dput(h_parent);
+ ii_write_unlock(a->dir);
+out:
+ /* inode_unlock(a->dir); */
+ au_whtmp_rmdir_free(a);
+ si_read_unlock(sb);
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ if (unlikely(err))
+ AuIOErr("err %d\n", err);
+}
+
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args)
+{
+ int wkq_err;
+ struct super_block *sb;
+
+ IMustLock(dir);
+
+ /* all post-process will be done in do_rmdir_whtmp(). */
+ sb = dir->i_sb;
+ args->dir = au_igrab(dir);
+ args->br = au_sbr(sb, bindex);
+ atomic_inc(&args->br->br_count);
+ args->wh_dentry = dget(wh_dentry);
+ wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0);
+ if (unlikely(wkq_err)) {
+ pr_warn("rmdir error %pd (%d), ignored\n", wh_dentry, wkq_err);
+ au_whtmp_rmdir_free(args);
+ }
+}
diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h
new file mode 100644
index 000000000..4077dd19e
--- /dev/null
+++ b/fs/aufs/whout.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * whiteout for logical deletion and opaque directory
+ */
+
+#ifndef __AUFS_WHOUT_H__
+#define __AUFS_WHOUT_H__
+
+#ifdef __KERNEL__
+
+#include "dir.h"
+
+/* whout.c */
+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name);
+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio);
+int au_diropq_test(struct dentry *h_dentry);
+struct au_branch;
+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br,
+ struct qstr *prefix);
+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br);
+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path,
+ struct dentry *dentry);
+int au_wh_init(struct au_branch *br, struct super_block *sb);
+
+/* diropq flags */
+#define AuDiropq_CREATE 1
+#define au_ftest_diropq(flags, name) ((flags) & AuDiropq_##name)
+#define au_fset_diropq(flags, name) \
+ do { (flags) |= AuDiropq_##name; } while (0)
+#define au_fclr_diropq(flags, name) \
+ do { (flags) &= ~AuDiropq_##name; } while (0)
+
+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex,
+ unsigned int flags);
+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name,
+ struct au_branch *br);
+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex,
+ struct dentry *h_parent);
+
+/* real rmdir for the whiteout-ed dir */
+struct au_whtmp_rmdir {
+ struct inode *dir;
+ struct au_branch *br;
+ struct dentry *wh_dentry;
+ struct au_nhash whlist;
+};
+
+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp);
+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp);
+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_nhash *whlist);
+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex,
+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args);
+
+/* ---------------------------------------------------------------------- */
+
+static inline struct dentry *au_diropq_create(struct dentry *dentry,
+ aufs_bindex_t bindex)
+{
+ return au_diropq_sio(dentry, bindex, AuDiropq_CREATE);
+}
+
+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex)
+{
+ return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE));
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WHOUT_H__ */
diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c
new file mode 100644
index 000000000..0f1500e93
--- /dev/null
+++ b/fs/aufs/wkq.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new dredential scheme
+ */
+
+#include <linux/module.h>
+#include "aufs.h"
+
+/* internal workqueue named AUFS_WKQ_NAME */
+
+static struct workqueue_struct *au_wkq;
+
+struct au_wkinfo {
+ struct work_struct wk;
+ struct kobject *kobj;
+
+ unsigned int flags; /* see wkq.h */
+
+ au_wkq_func_t func;
+ void *args;
+
+ struct completion *comp;
+};
+
+/* ---------------------------------------------------------------------- */
+
+static void wkq_func(struct work_struct *wk)
+{
+ struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
+
+ AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
+ AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
+
+ wkinfo->func(wkinfo->args);
+ if (au_ftest_wkq(wkinfo->flags, WAIT))
+ complete(wkinfo->comp);
+ else {
+ kobject_put(wkinfo->kobj);
+ module_put(THIS_MODULE); /* todo: ?? */
+ kfree(wkinfo);
+ }
+}
+
+/*
+ * Since struct completion is large, try allocating it dynamically.
+ */
+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
+#define AuWkqCompDeclare(name) struct completion *comp = NULL
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+ *comp = kmalloc(sizeof(**comp), GFP_NOFS);
+ if (*comp) {
+ init_completion(*comp);
+ wkinfo->comp = *comp;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static void au_wkq_comp_free(struct completion *comp)
+{
+ kfree(comp);
+}
+
+#else
+
+/* no braces */
+#define AuWkqCompDeclare(name) \
+ DECLARE_COMPLETION_ONSTACK(_ ## name); \
+ struct completion *comp = &_ ## name
+
+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
+{
+ wkinfo->comp = *comp;
+ return 0;
+}
+
+static void au_wkq_comp_free(struct completion *comp __maybe_unused)
+{
+ /* empty */
+}
+#endif /* 4KSTACKS */
+
+static void au_wkq_run(struct au_wkinfo *wkinfo)
+{
+ if (au_ftest_wkq(wkinfo->flags, NEST)) {
+ if (au_wkq_test()) {
+ AuWarn1("wkq from wkq, unless silly-rename on NFS,"
+ " due to a dead dir by UDBA?\n");
+ AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
+ }
+ } else
+ au_dbg_verify_kthread();
+
+ if (au_ftest_wkq(wkinfo->flags, WAIT)) {
+ INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
+ queue_work(au_wkq, &wkinfo->wk);
+ } else {
+ INIT_WORK(&wkinfo->wk, wkq_func);
+ schedule_work(&wkinfo->wk);
+ }
+}
+
+/*
+ * Be careful. It is easy to make deadlock happen.
+ * processA: lock, wkq and wait
+ * processB: wkq and wait, lock in wkq
+ * --> deadlock
+ */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
+{
+ int err;
+ AuWkqCompDeclare(comp);
+ struct au_wkinfo wkinfo = {
+ .flags = flags,
+ .func = func,
+ .args = args
+ };
+
+ err = au_wkq_comp_alloc(&wkinfo, &comp);
+ if (!err) {
+ au_wkq_run(&wkinfo);
+ /* no timeout, no interrupt */
+ wait_for_completion(wkinfo.comp);
+ au_wkq_comp_free(comp);
+ destroy_work_on_stack(&wkinfo.wk);
+ }
+
+ return err;
+
+}
+
+/*
+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a
+ * problem in a concurrent umounting.
+ */
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+ unsigned int flags)
+{
+ int err;
+ struct au_wkinfo *wkinfo;
+
+ atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
+
+ /*
+ * wkq_func() must free this wkinfo.
+ * it highly depends upon the implementation of workqueue.
+ */
+ err = 0;
+ wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
+ if (wkinfo) {
+ wkinfo->kobj = &au_sbi(sb)->si_kobj;
+ wkinfo->flags = flags & ~AuWkq_WAIT;
+ wkinfo->func = func;
+ wkinfo->args = args;
+ wkinfo->comp = NULL;
+ kobject_get(wkinfo->kobj);
+ __module_get(THIS_MODULE); /* todo: ?? */
+
+ au_wkq_run(wkinfo);
+ } else {
+ err = -ENOMEM;
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+void au_nwt_init(struct au_nowait_tasks *nwt)
+{
+ atomic_set(&nwt->nw_len, 0);
+ /* smp_mb(); */ /* atomic_set */
+ init_waitqueue_head(&nwt->nw_wq);
+}
+
+void au_wkq_fin(void)
+{
+ destroy_workqueue(au_wkq);
+}
+
+int __init au_wkq_init(void)
+{
+ int err;
+
+ err = 0;
+ au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
+ if (IS_ERR(au_wkq))
+ err = PTR_ERR(au_wkq);
+ else if (!au_wkq)
+ err = -ENOMEM;
+
+ return err;
+}
diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h
new file mode 100644
index 000000000..f6c9b9902
--- /dev/null
+++ b/fs/aufs/wkq.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * workqueue for asynchronous/super-io operations
+ * todo: try new credentials management scheme
+ */
+
+#ifndef __AUFS_WKQ_H__
+#define __AUFS_WKQ_H__
+
+#ifdef __KERNEL__
+
+struct super_block;
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue
+ */
+struct au_nowait_tasks {
+ atomic_t nw_len;
+ wait_queue_head_t nw_wq;
+};
+
+/* ---------------------------------------------------------------------- */
+
+typedef void (*au_wkq_func_t)(void *args);
+
+/* wkq flags */
+#define AuWkq_WAIT 1
+#define AuWkq_NEST (1 << 1)
+#define au_ftest_wkq(flags, name) ((flags) & AuWkq_##name)
+#define au_fset_wkq(flags, name) \
+ do { (flags) |= AuWkq_##name; } while (0)
+#define au_fclr_wkq(flags, name) \
+ do { (flags) &= ~AuWkq_##name; } while (0)
+
+#ifndef CONFIG_AUFS_HNOTIFY
+#undef AuWkq_NEST
+#define AuWkq_NEST 0
+#endif
+
+/* wkq.c */
+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args);
+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
+ unsigned int flags);
+void au_nwt_init(struct au_nowait_tasks *nwt);
+int __init au_wkq_init(void);
+void au_wkq_fin(void);
+
+/* ---------------------------------------------------------------------- */
+
+static inline int au_wkq_test(void)
+{
+ return current->flags & PF_WQ_WORKER;
+}
+
+static inline int au_wkq_wait(au_wkq_func_t func, void *args)
+{
+ return au_wkq_do_wait(AuWkq_WAIT, func, args);
+}
+
+static inline void au_nwt_done(struct au_nowait_tasks *nwt)
+{
+ if (atomic_dec_and_test(&nwt->nw_len))
+ wake_up_all(&nwt->nw_wq);
+}
+
+static inline int au_nwt_flush(struct au_nowait_tasks *nwt)
+{
+ wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len));
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __AUFS_WKQ_H__ */
diff --git a/fs/aufs/xattr.c b/fs/aufs/xattr.c
new file mode 100644
index 000000000..890c10795
--- /dev/null
+++ b/fs/aufs/xattr.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2014-2016 Junjiro R. Okajima
+ */
+
+/*
+ * handling xattr functions
+ */
+
+#include <linux/xattr.h>
+#include "aufs.h"
+
+static int au_xattr_ignore(int err, char *name, unsigned int ignore_flags)
+{
+ if (!ignore_flags)
+ goto out;
+ switch (err) {
+ case -ENOMEM:
+ case -EDQUOT:
+ goto out;
+ }
+
+ if ((ignore_flags & AuBrAttr_ICEX) == AuBrAttr_ICEX) {
+ err = 0;
+ goto out;
+ }
+
+#define cmp(brattr, prefix) do { \
+ if (!strncmp(name, XATTR_##prefix##_PREFIX, \
+ XATTR_##prefix##_PREFIX_LEN)) { \
+ if (ignore_flags & AuBrAttr_ICEX_##brattr) \
+ err = 0; \
+ goto out; \
+ } \
+ } while (0)
+
+ cmp(SEC, SECURITY);
+ cmp(SYS, SYSTEM);
+ cmp(TR, TRUSTED);
+ cmp(USR, USER);
+#undef cmp
+
+ if (ignore_flags & AuBrAttr_ICEX_OTH)
+ err = 0;
+
+out:
+ return err;
+}
+
+static const int au_xattr_out_of_list = AuBrAttr_ICEX_OTH << 1;
+
+static int au_do_cpup_xattr(struct dentry *h_dst, struct dentry *h_src,
+ char *name, char **buf, unsigned int ignore_flags,
+ unsigned int verbose)
+{
+ int err;
+ ssize_t ssz;
+ struct inode *h_idst;
+
+ ssz = vfs_getxattr_alloc(h_src, name, buf, 0, GFP_NOFS);
+ err = ssz;
+ if (unlikely(err <= 0)) {
+ if (err == -ENODATA
+ || (err == -EOPNOTSUPP
+ && ((ignore_flags & au_xattr_out_of_list)
+ || (au_test_nfs_noacl(d_inode(h_src))
+ && (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS)
+ || !strcmp(name,
+ XATTR_NAME_POSIX_ACL_DEFAULT))))
+ ))
+ err = 0;
+ if (err && (verbose || au_debug_test()))
+ pr_err("%s, err %d\n", name, err);
+ goto out;
+ }
+
+ /* unlock it temporary */
+ h_idst = d_inode(h_dst);
+ inode_unlock(h_idst);
+ err = vfsub_setxattr(h_dst, name, *buf, ssz, /*flags*/0);
+ inode_lock_nested(h_idst, AuLsc_I_CHILD2);
+ if (unlikely(err)) {
+ if (verbose || au_debug_test())
+ pr_err("%s, err %d\n", name, err);
+ err = au_xattr_ignore(err, name, ignore_flags);
+ }
+
+out:
+ return err;
+}
+
+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags,
+ unsigned int verbose)
+{
+ int err, unlocked, acl_access, acl_default;
+ ssize_t ssz;
+ struct inode *h_isrc, *h_idst;
+ char *value, *p, *o, *e;
+
+ /* try stopping to update the source inode while we are referencing */
+ /* there should not be the parent-child relationship between them */
+ h_isrc = d_inode(h_src);
+ h_idst = d_inode(h_dst);
+ inode_unlock(h_idst);
+ inode_lock_nested(h_isrc, AuLsc_I_CHILD);
+ inode_lock_nested(h_idst, AuLsc_I_CHILD2);
+ unlocked = 0;
+
+ /* some filesystems don't list POSIX ACL, for example tmpfs */
+ ssz = vfs_listxattr(h_src, NULL, 0);
+ err = ssz;
+ if (unlikely(err < 0)) {
+ AuTraceErr(err);
+ if (err == -ENODATA
+ || err == -EOPNOTSUPP)
+ err = 0; /* ignore */
+ goto out;
+ }
+
+ err = 0;
+ p = NULL;
+ o = NULL;
+ if (ssz) {
+ err = -ENOMEM;
+ p = kmalloc(ssz, GFP_NOFS);
+ o = p;
+ if (unlikely(!p))
+ goto out;
+ err = vfs_listxattr(h_src, p, ssz);
+ }
+ inode_unlock(h_isrc);
+ unlocked = 1;
+ AuDbg("err %d, ssz %zd\n", err, ssz);
+ if (unlikely(err < 0))
+ goto out_free;
+
+ err = 0;
+ e = p + ssz;
+ value = NULL;
+ acl_access = 0;
+ acl_default = 0;
+ while (!err && p < e) {
+ acl_access |= !strncmp(p, XATTR_NAME_POSIX_ACL_ACCESS,
+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1);
+ acl_default |= !strncmp(p, XATTR_NAME_POSIX_ACL_DEFAULT,
+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)
+ - 1);
+ err = au_do_cpup_xattr(h_dst, h_src, p, &value, ignore_flags,
+ verbose);
+ p += strlen(p) + 1;
+ }
+ AuTraceErr(err);
+ ignore_flags |= au_xattr_out_of_list;
+ if (!err && !acl_access) {
+ err = au_do_cpup_xattr(h_dst, h_src,
+ XATTR_NAME_POSIX_ACL_ACCESS, &value,
+ ignore_flags, verbose);
+ AuTraceErr(err);
+ }
+ if (!err && !acl_default) {
+ err = au_do_cpup_xattr(h_dst, h_src,
+ XATTR_NAME_POSIX_ACL_DEFAULT, &value,
+ ignore_flags, verbose);
+ AuTraceErr(err);
+ }
+
+ kfree(value);
+
+out_free:
+ kfree(o);
+out:
+ if (!unlocked)
+ inode_unlock(h_isrc);
+ AuTraceErr(err);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+enum {
+ AU_XATTR_LIST,
+ AU_XATTR_GET
+};
+
+struct au_lgxattr {
+ int type;
+ union {
+ struct {
+ char *list;
+ size_t size;
+ } list;
+ struct {
+ const char *name;
+ void *value;
+ size_t size;
+ } get;
+ } u;
+};
+
+static ssize_t au_lgxattr(struct dentry *dentry, struct au_lgxattr *arg)
+{
+ ssize_t err;
+ struct path h_path;
+ struct super_block *sb;
+
+ sb = dentry->d_sb;
+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM);
+ if (unlikely(err))
+ goto out;
+ err = au_h_path_getattr(dentry, /*force*/1, &h_path);
+ if (unlikely(err))
+ goto out_si;
+ if (unlikely(!h_path.dentry))
+ /* illegally overlapped or something */
+ goto out_di; /* pretending success */
+
+ /* always topmost entry only */
+ switch (arg->type) {
+ case AU_XATTR_LIST:
+ err = vfs_listxattr(h_path.dentry,
+ arg->u.list.list, arg->u.list.size);
+ break;
+ case AU_XATTR_GET:
+ err = vfs_getxattr(h_path.dentry,
+ arg->u.get.name, arg->u.get.value,
+ arg->u.get.size);
+ break;
+ }
+
+out_di:
+ di_read_unlock(dentry, AuLock_IR);
+out_si:
+ si_read_unlock(sb);
+out:
+ AuTraceErr(err);
+ return err;
+}
+
+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+ struct au_lgxattr arg = {
+ .type = AU_XATTR_LIST,
+ .u.list = {
+ .list = list,
+ .size = size
+ },
+ };
+
+ return au_lgxattr(dentry, &arg);
+}
+
+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value,
+ size_t size)
+{
+ struct au_lgxattr arg = {
+ .type = AU_XATTR_GET,
+ .u.get = {
+ .name = name,
+ .value = value,
+ .size = size
+ },
+ };
+
+ return au_lgxattr(dentry, &arg);
+}
+
+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct au_srxattr arg = {
+ .type = AU_XATTR_SET,
+ .u.set = {
+ .name = name,
+ .value = value,
+ .size = size,
+ .flags = flags
+ },
+ };
+
+ return au_srxattr(dentry, &arg);
+}
+
+int aufs_removexattr(struct dentry *dentry, const char *name)
+{
+ struct au_srxattr arg = {
+ .type = AU_XATTR_REMOVE,
+ .u.remove = {
+ .name = name
+ },
+ };
+
+ return au_srxattr(dentry, &arg);
+}
+
+/* ---------------------------------------------------------------------- */
+
+#if 0
+static size_t au_xattr_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
+{
+ return aufs_listxattr(dentry, list, list_size);
+}
+
+static int au_xattr_get(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
+{
+ return aufs_getxattr(dentry, name, buffer, size);
+}
+
+static int au_xattr_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ return aufs_setxattr(dentry, name, value, size, flags);
+}
+
+static const struct xattr_handler au_xattr_handler = {
+ /* no prefix, no flags */
+ .list = au_xattr_list,
+ .get = au_xattr_get,
+ .set = au_xattr_set
+ /* why no remove? */
+};
+
+static const struct xattr_handler *au_xattr_handlers[] = {
+ &au_xattr_handler
+};
+
+void au_xattr_init(struct super_block *sb)
+{
+ /* sb->s_xattr = au_xattr_handlers; */
+}
+#endif
diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c
new file mode 100644
index 000000000..2773caedc
--- /dev/null
+++ b/fs/aufs/xino.c
@@ -0,0 +1,1305 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+/*
+ * external inode number translation table and bitmap
+ */
+
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include "aufs.h"
+
+/* todo: unnecessary to support mmap_sem since kernel-space? */
+ssize_t xino_fread(vfs_readf_t func, struct file *file, void *kbuf, size_t size,
+ loff_t *pos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ char __user *u;
+ } buf;
+
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ do {
+ /* todo: signal_pending? */
+ err = func(file, buf.u, size, pos);
+ } while (err == -EAGAIN || err == -EINTR);
+ set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+ if (err > 0)
+ fsnotify_access(file->f_path.dentry);
+#endif
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static ssize_t xino_fwrite_wkq(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos);
+
+static ssize_t do_xino_fwrite(vfs_writef_t func, struct file *file, void *kbuf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+ mm_segment_t oldfs;
+ union {
+ void *k;
+ const char __user *u;
+ } buf;
+ int i;
+ const int prevent_endless = 10;
+
+ i = 0;
+ buf.k = kbuf;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ do {
+ err = func(file, buf.u, size, pos);
+ if (err == -EINTR
+ && !au_wkq_test()
+ && fatal_signal_pending(current)) {
+ set_fs(oldfs);
+ err = xino_fwrite_wkq(func, file, kbuf, size, pos);
+ BUG_ON(err == -EINTR);
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ }
+ } while (i++ < prevent_endless
+ && (err == -EAGAIN || err == -EINTR));
+ set_fs(oldfs);
+
+#if 0 /* reserved for future use */
+ if (err > 0)
+ fsnotify_modify(file->f_path.dentry);
+#endif
+
+ return err;
+}
+
+struct do_xino_fwrite_args {
+ ssize_t *errp;
+ vfs_writef_t func;
+ struct file *file;
+ void *buf;
+ size_t size;
+ loff_t *pos;
+};
+
+static void call_do_xino_fwrite(void *args)
+{
+ struct do_xino_fwrite_args *a = args;
+ *a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos);
+}
+
+static ssize_t xino_fwrite_wkq(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+ int wkq_err;
+ struct do_xino_fwrite_args args = {
+ .errp = &err,
+ .func = func,
+ .file = file,
+ .buf = buf,
+ .size = size,
+ .pos = pos
+ };
+
+ /*
+ * it breaks RLIMIT_FSIZE and normal user's limit,
+ * users should care about quota and real 'filesystem full.'
+ */
+ wkq_err = au_wkq_wait(call_do_xino_fwrite, &args);
+ if (unlikely(wkq_err))
+ err = wkq_err;
+
+ return err;
+}
+
+ssize_t xino_fwrite(vfs_writef_t func, struct file *file, void *buf,
+ size_t size, loff_t *pos)
+{
+ ssize_t err;
+
+ if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) {
+ lockdep_off();
+ err = do_xino_fwrite(func, file, buf, size, pos);
+ lockdep_on();
+ } else
+ err = xino_fwrite_wkq(func, file, buf, size, pos);
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create a new xinofile at the same place/path as @base_file.
+ */
+struct file *au_xino_create2(struct file *base_file, struct file *copy_src)
+{
+ struct file *file;
+ struct dentry *base, *parent;
+ struct inode *dir, *delegated;
+ struct qstr *name;
+ struct path path;
+ int err;
+
+ base = base_file->f_path.dentry;
+ parent = base->d_parent; /* dir inode is locked */
+ dir = d_inode(parent);
+ IMustLock(dir);
+
+ file = ERR_PTR(-EINVAL);
+ name = &base->d_name;
+ path.dentry = vfsub_lookup_one_len(name->name, parent, name->len);
+ if (IS_ERR(path.dentry)) {
+ file = (void *)path.dentry;
+ pr_err("%pd lookup err %ld\n",
+ base, PTR_ERR(path.dentry));
+ goto out;
+ }
+
+ /* no need to mnt_want_write() since we call dentry_open() later */
+ err = vfs_create(dir, path.dentry, S_IRUGO | S_IWUGO, NULL);
+ if (unlikely(err)) {
+ file = ERR_PTR(err);
+ pr_err("%pd create err %d\n", base, err);
+ goto out_dput;
+ }
+
+ path.mnt = base_file->f_path.mnt;
+ file = vfsub_dentry_open(&path,
+ O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+ /* | __FMODE_NONOTIFY */);
+ if (IS_ERR(file)) {
+ pr_err("%pd open err %ld\n", base, PTR_ERR(file));
+ goto out_dput;
+ }
+
+ delegated = NULL;
+ err = vfsub_unlink(dir, &file->f_path, &delegated, /*force*/0);
+ if (unlikely(err == -EWOULDBLOCK)) {
+ pr_warn("cannot retry for NFSv4 delegation"
+ " for an internal unlink\n");
+ iput(delegated);
+ }
+ if (unlikely(err)) {
+ pr_err("%pd unlink err %d\n", base, err);
+ goto out_fput;
+ }
+
+ if (copy_src) {
+ /* no one can touch copy_src xino */
+ err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src));
+ if (unlikely(err)) {
+ pr_err("%pd copy err %d\n", base, err);
+ goto out_fput;
+ }
+ }
+ goto out_dput; /* success */
+
+out_fput:
+ fput(file);
+ file = ERR_PTR(err);
+out_dput:
+ dput(path.dentry);
+out:
+ return file;
+}
+
+struct au_xino_lock_dir {
+ struct au_hinode *hdir;
+ struct dentry *parent;
+ struct inode *dir;
+};
+
+static void au_xino_lock_dir(struct super_block *sb, struct file *xino,
+ struct au_xino_lock_dir *ldir)
+{
+ aufs_bindex_t brid, bindex;
+
+ ldir->hdir = NULL;
+ bindex = -1;
+ brid = au_xino_brid(sb);
+ if (brid >= 0)
+ bindex = au_br_index(sb, brid);
+ if (bindex >= 0) {
+ ldir->hdir = au_hi(d_inode(sb->s_root), bindex);
+ au_hn_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT);
+ } else {
+ ldir->parent = dget_parent(xino->f_path.dentry);
+ ldir->dir = d_inode(ldir->parent);
+ inode_lock_nested(ldir->dir, AuLsc_I_PARENT);
+ }
+}
+
+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir)
+{
+ if (ldir->hdir)
+ au_hn_imtx_unlock(ldir->hdir);
+ else {
+ inode_unlock(ldir->dir);
+ dput(ldir->parent);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* trucate xino files asynchronously */
+
+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex)
+{
+ int err;
+ unsigned long jiffy;
+ blkcnt_t blocks;
+ aufs_bindex_t bi, bend;
+ struct kstatfs *st;
+ struct au_branch *br;
+ struct file *new_xino, *file;
+ struct super_block *h_sb;
+ struct au_xino_lock_dir ldir;
+
+ err = -ENOMEM;
+ st = kmalloc(sizeof(*st), GFP_NOFS);
+ if (unlikely(!st))
+ goto out;
+
+ err = -EINVAL;
+ bend = au_sbend(sb);
+ if (unlikely(bindex < 0 || bend < bindex))
+ goto out_st;
+ br = au_sbr(sb, bindex);
+ file = br->br_xino.xi_file;
+ if (!file)
+ goto out_st;
+
+ err = vfs_statfs(&file->f_path, st);
+ if (unlikely(err))
+ AuErr1("statfs err %d, ignored\n", err);
+ jiffy = jiffies;
+ blocks = file_inode(file)->i_blocks;
+ pr_info("begin truncating xino(b%d), ib%llu, %llu/%llu free blks\n",
+ bindex, (u64)blocks, st->f_bfree, st->f_blocks);
+
+ au_xino_lock_dir(sb, file, &ldir);
+ /* mnt_want_write() is unnecessary here */
+ new_xino = au_xino_create2(file, file);
+ au_xino_unlock_dir(&ldir);
+ err = PTR_ERR(new_xino);
+ if (IS_ERR(new_xino)) {
+ pr_err("err %d, ignored\n", err);
+ goto out_st;
+ }
+ err = 0;
+ fput(file);
+ br->br_xino.xi_file = new_xino;
+
+ h_sb = au_br_sb(br);
+ for (bi = 0; bi <= bend; bi++) {
+ if (unlikely(bi == bindex))
+ continue;
+ br = au_sbr(sb, bi);
+ if (au_br_sb(br) != h_sb)
+ continue;
+
+ fput(br->br_xino.xi_file);
+ br->br_xino.xi_file = new_xino;
+ get_file(new_xino);
+ }
+
+ err = vfs_statfs(&new_xino->f_path, st);
+ if (!err) {
+ pr_info("end truncating xino(b%d), ib%llu, %llu/%llu free blks\n",
+ bindex, (u64)file_inode(new_xino)->i_blocks,
+ st->f_bfree, st->f_blocks);
+ if (file_inode(new_xino)->i_blocks < blocks)
+ au_sbi(sb)->si_xino_jiffy = jiffy;
+ } else
+ AuErr1("statfs err %d, ignored\n", err);
+
+out_st:
+ kfree(st);
+out:
+ return err;
+}
+
+struct xino_do_trunc_args {
+ struct super_block *sb;
+ struct au_branch *br;
+};
+
+static void xino_do_trunc(void *_args)
+{
+ struct xino_do_trunc_args *args = _args;
+ struct super_block *sb;
+ struct au_branch *br;
+ struct inode *dir;
+ int err;
+ aufs_bindex_t bindex;
+
+ err = 0;
+ sb = args->sb;
+ dir = d_inode(sb->s_root);
+ br = args->br;
+
+ si_noflush_write_lock(sb);
+ ii_read_lock_parent(dir);
+ bindex = au_br_index(sb, br->br_id);
+ err = au_xino_trunc(sb, bindex);
+ ii_read_unlock(dir);
+ if (unlikely(err))
+ pr_warn("err b%d, (%d)\n", bindex, err);
+ atomic_dec(&br->br_xino_running);
+ atomic_dec(&br->br_count);
+ si_write_unlock(sb);
+ au_nwt_done(&au_sbi(sb)->si_nowait);
+ kfree(args);
+}
+
+static int xino_trunc_test(struct super_block *sb, struct au_branch *br)
+{
+ int err;
+ struct kstatfs st;
+ struct au_sbinfo *sbinfo;
+
+ /* todo: si_xino_expire and the ratio should be customizable */
+ sbinfo = au_sbi(sb);
+ if (time_before(jiffies,
+ sbinfo->si_xino_jiffy + sbinfo->si_xino_expire))
+ return 0;
+
+ /* truncation border */
+ err = vfs_statfs(&br->br_xino.xi_file->f_path, &st);
+ if (unlikely(err)) {
+ AuErr1("statfs err %d, ignored\n", err);
+ return 0;
+ }
+ if (div64_u64(st.f_bfree * 100, st.f_blocks) >= AUFS_XINO_DEF_TRUNC)
+ return 0;
+
+ return 1;
+}
+
+static void xino_try_trunc(struct super_block *sb, struct au_branch *br)
+{
+ struct xino_do_trunc_args *args;
+ int wkq_err;
+
+ if (!xino_trunc_test(sb, br))
+ return;
+
+ if (atomic_inc_return(&br->br_xino_running) > 1)
+ goto out;
+
+ /* lock and kfree() will be called in trunc_xino() */
+ args = kmalloc(sizeof(*args), GFP_NOFS);
+ if (unlikely(!args)) {
+ AuErr1("no memory\n");
+ goto out_args;
+ }
+
+ atomic_inc(&br->br_count);
+ args->sb = sb;
+ args->br = br;
+ wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0);
+ if (!wkq_err)
+ return; /* success */
+
+ pr_err("wkq %d\n", wkq_err);
+ atomic_dec(&br->br_count);
+
+out_args:
+ kfree(args);
+out:
+ atomic_dec(&br->br_xino_running);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int au_xino_do_write(vfs_writef_t write, struct file *file,
+ ino_t h_ino, ino_t ino)
+{
+ loff_t pos;
+ ssize_t sz;
+
+ pos = h_ino;
+ if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) {
+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
+ return -EFBIG;
+ }
+ pos *= sizeof(ino);
+ sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos);
+ if (sz == sizeof(ino))
+ return 0; /* success */
+
+ AuIOErr("write failed (%zd)\n", sz);
+ return -EIO;
+}
+
+/*
+ * write @ino to the xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * even if @ino is zero, it is written to the xinofile and means no entry.
+ * if the size of the xino file on a specific filesystem exceeds the watermark,
+ * try truncating it.
+ */
+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t ino)
+{
+ int err;
+ unsigned int mnt_flags;
+ struct au_branch *br;
+
+ BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max)
+ || ((loff_t)-1) > 0);
+ SiMustAnyLock(sb);
+
+ mnt_flags = au_mntflags(sb);
+ if (!au_opt_test(mnt_flags, XINO))
+ return 0;
+
+ br = au_sbr(sb, bindex);
+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
+ h_ino, ino);
+ if (!err) {
+ if (au_opt_test(mnt_flags, TRUNC_XINO)
+ && au_test_fs_trunc_xino(au_br_sb(br)))
+ xino_try_trunc(sb, br);
+ return 0; /* success */
+ }
+
+ AuIOErr("write failed (%d)\n", err);
+ return -EIO;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* aufs inode number bitmap */
+
+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE;
+static ino_t xib_calc_ino(unsigned long pindex, int bit)
+{
+ ino_t ino;
+
+ AuDebugOn(bit < 0 || page_bits <= bit);
+ ino = AUFS_FIRST_INO + pindex * page_bits + bit;
+ return ino;
+}
+
+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit)
+{
+ AuDebugOn(ino < AUFS_FIRST_INO);
+ ino -= AUFS_FIRST_INO;
+ *pindex = ino / page_bits;
+ *bit = ino % page_bits;
+}
+
+static int xib_pindex(struct super_block *sb, unsigned long pindex)
+{
+ int err;
+ loff_t pos;
+ ssize_t sz;
+ struct au_sbinfo *sbinfo;
+ struct file *xib;
+ unsigned long *p;
+
+ sbinfo = au_sbi(sb);
+ MtxMustLock(&sbinfo->si_xib_mtx);
+ AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE
+ || !au_opt_test(sbinfo->si_mntflags, XINO));
+
+ if (pindex == sbinfo->si_xib_last_pindex)
+ return 0;
+
+ xib = sbinfo->si_xib;
+ p = sbinfo->si_xib_buf;
+ pos = sbinfo->si_xib_last_pindex;
+ pos *= PAGE_SIZE;
+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+ if (unlikely(sz != PAGE_SIZE))
+ goto out;
+
+ pos = pindex;
+ pos *= PAGE_SIZE;
+ if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE)
+ sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos);
+ else {
+ memset(p, 0, PAGE_SIZE);
+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos);
+ }
+ if (sz == PAGE_SIZE) {
+ sbinfo->si_xib_last_pindex = pindex;
+ return 0; /* success */
+ }
+
+out:
+ AuIOErr1("write failed (%zd)\n", sz);
+ err = sz;
+ if (sz >= 0)
+ err = -EIO;
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void au_xib_clear_bit(struct inode *inode)
+{
+ int err, bit;
+ unsigned long pindex;
+ struct super_block *sb;
+ struct au_sbinfo *sbinfo;
+
+ AuDebugOn(inode->i_nlink);
+
+ sb = inode->i_sb;
+ xib_calc_bit(inode->i_ino, &pindex, &bit);
+ AuDebugOn(page_bits <= bit);
+ sbinfo = au_sbi(sb);
+ mutex_lock(&sbinfo->si_xib_mtx);
+ err = xib_pindex(sb, pindex);
+ if (!err) {
+ clear_bit(bit, sbinfo->si_xib_buf);
+ sbinfo->si_xib_next_bit = bit;
+ }
+ mutex_unlock(&sbinfo->si_xib_mtx);
+}
+
+/* for s_op->delete_inode() */
+void au_xino_delete_inode(struct inode *inode, const int unlinked)
+{
+ int err;
+ unsigned int mnt_flags;
+ aufs_bindex_t bindex, bend, bi;
+ unsigned char try_trunc;
+ struct au_iinfo *iinfo;
+ struct super_block *sb;
+ struct au_hinode *hi;
+ struct inode *h_inode;
+ struct au_branch *br;
+ vfs_writef_t xwrite;
+
+ sb = inode->i_sb;
+ mnt_flags = au_mntflags(sb);
+ if (!au_opt_test(mnt_flags, XINO)
+ || inode->i_ino == AUFS_ROOT_INO)
+ return;
+
+ if (unlinked) {
+ au_xigen_inc(inode);
+ au_xib_clear_bit(inode);
+ }
+
+ iinfo = au_ii(inode);
+ if (!iinfo)
+ return;
+
+ bindex = iinfo->ii_bstart;
+ if (bindex < 0)
+ return;
+
+ xwrite = au_sbi(sb)->si_xwrite;
+ try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO);
+ hi = iinfo->ii_hinode + bindex;
+ bend = iinfo->ii_bend;
+ for (; bindex <= bend; bindex++, hi++) {
+ h_inode = hi->hi_inode;
+ if (!h_inode
+ || (!unlinked && h_inode->i_nlink))
+ continue;
+
+ /* inode may not be revalidated */
+ bi = au_br_index(sb, hi->hi_id);
+ if (bi < 0)
+ continue;
+
+ br = au_sbr(sb, bi);
+ err = au_xino_do_write(xwrite, br->br_xino.xi_file,
+ h_inode->i_ino, /*ino*/0);
+ if (!err && try_trunc
+ && au_test_fs_trunc_xino(au_br_sb(br)))
+ xino_try_trunc(sb, br);
+ }
+}
+
+/* get an unused inode number from bitmap */
+ino_t au_xino_new_ino(struct super_block *sb)
+{
+ ino_t ino;
+ unsigned long *p, pindex, ul, pend;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+ int free_bit, err;
+
+ if (!au_opt_test(au_mntflags(sb), XINO))
+ return iunique(sb, AUFS_FIRST_INO);
+
+ sbinfo = au_sbi(sb);
+ mutex_lock(&sbinfo->si_xib_mtx);
+ p = sbinfo->si_xib_buf;
+ free_bit = sbinfo->si_xib_next_bit;
+ if (free_bit < page_bits && !test_bit(free_bit, p))
+ goto out; /* success */
+ free_bit = find_first_zero_bit(p, page_bits);
+ if (free_bit < page_bits)
+ goto out; /* success */
+
+ pindex = sbinfo->si_xib_last_pindex;
+ for (ul = pindex - 1; ul < ULONG_MAX; ul--) {
+ err = xib_pindex(sb, ul);
+ if (unlikely(err))
+ goto out_err;
+ free_bit = find_first_zero_bit(p, page_bits);
+ if (free_bit < page_bits)
+ goto out; /* success */
+ }
+
+ file = sbinfo->si_xib;
+ pend = vfsub_f_size_read(file) / PAGE_SIZE;
+ for (ul = pindex + 1; ul <= pend; ul++) {
+ err = xib_pindex(sb, ul);
+ if (unlikely(err))
+ goto out_err;
+ free_bit = find_first_zero_bit(p, page_bits);
+ if (free_bit < page_bits)
+ goto out; /* success */
+ }
+ BUG();
+
+out:
+ set_bit(free_bit, p);
+ sbinfo->si_xib_next_bit = free_bit + 1;
+ pindex = sbinfo->si_xib_last_pindex;
+ mutex_unlock(&sbinfo->si_xib_mtx);
+ ino = xib_calc_ino(pindex, free_bit);
+ AuDbg("i%lu\n", (unsigned long)ino);
+ return ino;
+out_err:
+ mutex_unlock(&sbinfo->si_xib_mtx);
+ AuDbg("i0\n");
+ return 0;
+}
+
+/*
+ * read @ino from xinofile for the specified branch{@sb, @bindex}
+ * at the position of @h_ino.
+ * if @ino does not exist and @do_new is true, get new one.
+ */
+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino,
+ ino_t *ino)
+{
+ int err;
+ ssize_t sz;
+ loff_t pos;
+ struct file *file;
+ struct au_sbinfo *sbinfo;
+
+ *ino = 0;
+ if (!au_opt_test(au_mntflags(sb), XINO))
+ return 0; /* no xino */
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ pos = h_ino;
+ if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) {
+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino);
+ return -EFBIG;
+ }
+ pos *= sizeof(*ino);
+
+ file = au_sbr(sb, bindex)->br_xino.xi_file;
+ if (vfsub_f_size_read(file) < pos + sizeof(*ino))
+ return 0; /* no ino */
+
+ sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos);
+ if (sz == sizeof(*ino))
+ return 0; /* success */
+
+ err = sz;
+ if (unlikely(sz >= 0)) {
+ err = -EIO;
+ AuIOErr("xino read error (%zd)\n", sz);
+ }
+
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* create and set a new xino file */
+
+struct file *au_xino_create(struct super_block *sb, char *fname, int silent)
+{
+ struct file *file;
+ struct dentry *h_parent, *d;
+ struct inode *h_dir, *inode;
+ int err;
+
+ /*
+ * at mount-time, and the xino file is the default path,
+ * hnotify is disabled so we have no notify events to ignore.
+ * when a user specified the xino, we cannot get au_hdir to be ignored.
+ */
+ file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE
+ /* | __FMODE_NONOTIFY */,
+ S_IRUGO | S_IWUGO);
+ if (IS_ERR(file)) {
+ if (!silent)
+ pr_err("open %s(%ld)\n", fname, PTR_ERR(file));
+ return file;
+ }
+
+ /* keep file count */
+ err = 0;
+ inode = file_inode(file);
+ h_parent = dget_parent(file->f_path.dentry);
+ h_dir = d_inode(h_parent);
+ inode_lock_nested(h_dir, AuLsc_I_PARENT);
+ /* mnt_want_write() is unnecessary here */
+ /* no delegation since it is just created */
+ if (inode->i_nlink)
+ err = vfsub_unlink(h_dir, &file->f_path, /*delegated*/NULL,
+ /*force*/0);
+ inode_unlock(h_dir);
+ dput(h_parent);
+ if (unlikely(err)) {
+ if (!silent)
+ pr_err("unlink %s(%d)\n", fname, err);
+ goto out;
+ }
+
+ err = -EINVAL;
+ d = file->f_path.dentry;
+ if (unlikely(sb == d->d_sb)) {
+ if (!silent)
+ pr_err("%s must be outside\n", fname);
+ goto out;
+ }
+ if (unlikely(au_test_fs_bad_xino(d->d_sb))) {
+ if (!silent)
+ pr_err("xino doesn't support %s(%s)\n",
+ fname, au_sbtype(d->d_sb));
+ goto out;
+ }
+ return file; /* success */
+
+out:
+ fput(file);
+ file = ERR_PTR(err);
+ return file;
+}
+
+/*
+ * find another branch who is on the same filesystem of the specified
+ * branch{@btgt}. search until @bend.
+ */
+static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt,
+ aufs_bindex_t bend)
+{
+ aufs_bindex_t bindex;
+ struct super_block *tgt_sb = au_sbr_sb(sb, btgt);
+
+ for (bindex = 0; bindex < btgt; bindex++)
+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
+ return bindex;
+ for (bindex++; bindex <= bend; bindex++)
+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex)))
+ return bindex;
+ return -1;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * initialize the xinofile for the specified branch @br
+ * at the place/path where @base_file indicates.
+ * test whether another branch is on the same filesystem or not,
+ * if @do_test is true.
+ */
+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino,
+ struct file *base_file, int do_test)
+{
+ int err;
+ ino_t ino;
+ aufs_bindex_t bend, bindex;
+ struct au_branch *shared_br, *b;
+ struct file *file;
+ struct super_block *tgt_sb;
+
+ shared_br = NULL;
+ bend = au_sbend(sb);
+ if (do_test) {
+ tgt_sb = au_br_sb(br);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ b = au_sbr(sb, bindex);
+ if (tgt_sb == au_br_sb(b)) {
+ shared_br = b;
+ break;
+ }
+ }
+ }
+
+ if (!shared_br || !shared_br->br_xino.xi_file) {
+ struct au_xino_lock_dir ldir;
+
+ au_xino_lock_dir(sb, base_file, &ldir);
+ /* mnt_want_write() is unnecessary here */
+ file = au_xino_create2(base_file, NULL);
+ au_xino_unlock_dir(&ldir);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ br->br_xino.xi_file = file;
+ } else {
+ br->br_xino.xi_file = shared_br->br_xino.xi_file;
+ get_file(br->br_xino.xi_file);
+ }
+
+ ino = AUFS_ROOT_INO;
+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file,
+ h_ino, ino);
+ if (unlikely(err)) {
+ fput(br->br_xino.xi_file);
+ br->br_xino.xi_file = NULL;
+ }
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* trucate a xino bitmap file */
+
+/* todo: slow */
+static int do_xib_restore(struct super_block *sb, struct file *file, void *page)
+{
+ int err, bit;
+ ssize_t sz;
+ unsigned long pindex;
+ loff_t pos, pend;
+ struct au_sbinfo *sbinfo;
+ vfs_readf_t func;
+ ino_t *ino;
+ unsigned long *p;
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ MtxMustLock(&sbinfo->si_xib_mtx);
+ p = sbinfo->si_xib_buf;
+ func = sbinfo->si_xread;
+ pend = vfsub_f_size_read(file);
+ pos = 0;
+ while (pos < pend) {
+ sz = xino_fread(func, file, page, PAGE_SIZE, &pos);
+ err = sz;
+ if (unlikely(sz <= 0))
+ goto out;
+
+ err = 0;
+ for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) {
+ if (unlikely(*ino < AUFS_FIRST_INO))
+ continue;
+
+ xib_calc_bit(*ino, &pindex, &bit);
+ AuDebugOn(page_bits <= bit);
+ err = xib_pindex(sb, pindex);
+ if (!err)
+ set_bit(bit, p);
+ else
+ goto out;
+ }
+ }
+
+out:
+ return err;
+}
+
+static int xib_restore(struct super_block *sb)
+{
+ int err;
+ aufs_bindex_t bindex, bend;
+ void *page;
+
+ err = -ENOMEM;
+ page = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!page))
+ goto out;
+
+ err = 0;
+ bend = au_sbend(sb);
+ for (bindex = 0; !err && bindex <= bend; bindex++)
+ if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0)
+ err = do_xib_restore
+ (sb, au_sbr(sb, bindex)->br_xino.xi_file, page);
+ else
+ AuDbg("b%d\n", bindex);
+ free_page((unsigned long)page);
+
+out:
+ return err;
+}
+
+int au_xib_trunc(struct super_block *sb)
+{
+ int err;
+ ssize_t sz;
+ loff_t pos;
+ struct au_xino_lock_dir ldir;
+ struct au_sbinfo *sbinfo;
+ unsigned long *p;
+ struct file *file;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ if (!au_opt_test(sbinfo->si_mntflags, XINO))
+ goto out;
+
+ file = sbinfo->si_xib;
+ if (vfsub_f_size_read(file) <= PAGE_SIZE)
+ goto out;
+
+ au_xino_lock_dir(sb, file, &ldir);
+ /* mnt_want_write() is unnecessary here */
+ file = au_xino_create2(sbinfo->si_xib, NULL);
+ au_xino_unlock_dir(&ldir);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = file;
+
+ p = sbinfo->si_xib_buf;
+ memset(p, 0, PAGE_SIZE);
+ pos = 0;
+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos);
+ if (unlikely(sz != PAGE_SIZE)) {
+ err = sz;
+ AuIOErr("err %d\n", err);
+ if (sz >= 0)
+ err = -EIO;
+ goto out;
+ }
+
+ mutex_lock(&sbinfo->si_xib_mtx);
+ /* mnt_want_write() is unnecessary here */
+ err = xib_restore(sb);
+ mutex_unlock(&sbinfo->si_xib_mtx);
+
+out:
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * xino mount option handlers
+ */
+
+/* xino bitmap */
+static void xino_clear_xib(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ sbinfo->si_xread = NULL;
+ sbinfo->si_xwrite = NULL;
+ if (sbinfo->si_xib)
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = NULL;
+ free_page((unsigned long)sbinfo->si_xib_buf);
+ sbinfo->si_xib_buf = NULL;
+}
+
+static int au_xino_set_xib(struct super_block *sb, struct file *base)
+{
+ int err;
+ loff_t pos;
+ struct au_sbinfo *sbinfo;
+ struct file *file;
+
+ SiMustWriteLock(sb);
+
+ sbinfo = au_sbi(sb);
+ file = au_xino_create2(base, sbinfo->si_xib);
+ err = PTR_ERR(file);
+ if (IS_ERR(file))
+ goto out;
+ if (sbinfo->si_xib)
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = file;
+ sbinfo->si_xread = vfs_readf(file);
+ sbinfo->si_xwrite = vfs_writef(file);
+
+ err = -ENOMEM;
+ if (!sbinfo->si_xib_buf)
+ sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS);
+ if (unlikely(!sbinfo->si_xib_buf))
+ goto out_unset;
+
+ sbinfo->si_xib_last_pindex = 0;
+ sbinfo->si_xib_next_bit = 0;
+ if (vfsub_f_size_read(file) < PAGE_SIZE) {
+ pos = 0;
+ err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf,
+ PAGE_SIZE, &pos);
+ if (unlikely(err != PAGE_SIZE))
+ goto out_free;
+ }
+ err = 0;
+ goto out; /* success */
+
+out_free:
+ free_page((unsigned long)sbinfo->si_xib_buf);
+ sbinfo->si_xib_buf = NULL;
+ if (err >= 0)
+ err = -EIO;
+out_unset:
+ fput(sbinfo->si_xib);
+ sbinfo->si_xib = NULL;
+ sbinfo->si_xread = NULL;
+ sbinfo->si_xwrite = NULL;
+out:
+ return err;
+}
+
+/* xino for each branch */
+static void xino_clear_br(struct super_block *sb)
+{
+ aufs_bindex_t bindex, bend;
+ struct au_branch *br;
+
+ bend = au_sbend(sb);
+ for (bindex = 0; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (!br || !br->br_xino.xi_file)
+ continue;
+
+ fput(br->br_xino.xi_file);
+ br->br_xino.xi_file = NULL;
+ }
+}
+
+static int au_xino_set_br(struct super_block *sb, struct file *base)
+{
+ int err;
+ ino_t ino;
+ aufs_bindex_t bindex, bend, bshared;
+ struct {
+ struct file *old, *new;
+ } *fpair, *p;
+ struct au_branch *br;
+ struct inode *inode;
+ vfs_writef_t writef;
+
+ SiMustWriteLock(sb);
+
+ err = -ENOMEM;
+ bend = au_sbend(sb);
+ fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS);
+ if (unlikely(!fpair))
+ goto out;
+
+ inode = d_inode(sb->s_root);
+ ino = AUFS_ROOT_INO;
+ writef = au_sbi(sb)->si_xwrite;
+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
+ br = au_sbr(sb, bindex);
+ bshared = is_sb_shared(sb, bindex, bindex - 1);
+ if (bshared >= 0) {
+ /* shared xino */
+ *p = fpair[bshared];
+ get_file(p->new);
+ }
+
+ if (!p->new) {
+ /* new xino */
+ p->old = br->br_xino.xi_file;
+ p->new = au_xino_create2(base, br->br_xino.xi_file);
+ err = PTR_ERR(p->new);
+ if (IS_ERR(p->new)) {
+ p->new = NULL;
+ goto out_pair;
+ }
+ }
+
+ err = au_xino_do_write(writef, p->new,
+ au_h_iptr(inode, bindex)->i_ino, ino);
+ if (unlikely(err))
+ goto out_pair;
+ }
+
+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) {
+ br = au_sbr(sb, bindex);
+ if (br->br_xino.xi_file)
+ fput(br->br_xino.xi_file);
+ get_file(p->new);
+ br->br_xino.xi_file = p->new;
+ }
+
+out_pair:
+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++)
+ if (p->new)
+ fput(p->new);
+ else
+ break;
+ kfree(fpair);
+out:
+ return err;
+}
+
+void au_xino_clr(struct super_block *sb)
+{
+ struct au_sbinfo *sbinfo;
+
+ au_xigen_clr(sb);
+ xino_clear_xib(sb);
+ xino_clear_br(sb);
+ sbinfo = au_sbi(sb);
+ /* lvalue, do not call au_mntflags() */
+ au_opt_clr(sbinfo->si_mntflags, XINO);
+}
+
+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount)
+{
+ int err, skip;
+ struct dentry *parent, *cur_parent;
+ struct qstr *dname, *cur_name;
+ struct file *cur_xino;
+ struct inode *dir;
+ struct au_sbinfo *sbinfo;
+
+ SiMustWriteLock(sb);
+
+ err = 0;
+ sbinfo = au_sbi(sb);
+ parent = dget_parent(xino->file->f_path.dentry);
+ if (remount) {
+ skip = 0;
+ dname = &xino->file->f_path.dentry->d_name;
+ cur_xino = sbinfo->si_xib;
+ if (cur_xino) {
+ cur_parent = dget_parent(cur_xino->f_path.dentry);
+ cur_name = &cur_xino->f_path.dentry->d_name;
+ skip = (cur_parent == parent
+ && au_qstreq(dname, cur_name));
+ dput(cur_parent);
+ }
+ if (skip)
+ goto out;
+ }
+
+ au_opt_set(sbinfo->si_mntflags, XINO);
+ dir = d_inode(parent);
+ inode_lock_nested(dir, AuLsc_I_PARENT);
+ /* mnt_want_write() is unnecessary here */
+ err = au_xino_set_xib(sb, xino->file);
+ if (!err)
+ err = au_xigen_set(sb, xino->file);
+ if (!err)
+ err = au_xino_set_br(sb, xino->file);
+ inode_unlock(dir);
+ if (!err)
+ goto out; /* success */
+
+ /* reset all */
+ AuIOErr("failed creating xino(%d).\n", err);
+ au_xigen_clr(sb);
+ xino_clear_xib(sb);
+
+out:
+ dput(parent);
+ return err;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * create a xinofile at the default place/path.
+ */
+struct file *au_xino_def(struct super_block *sb)
+{
+ struct file *file;
+ char *page, *p;
+ struct au_branch *br;
+ struct super_block *h_sb;
+ struct path path;
+ aufs_bindex_t bend, bindex, bwr;
+
+ br = NULL;
+ bend = au_sbend(sb);
+ bwr = -1;
+ for (bindex = 0; bindex <= bend; bindex++) {
+ br = au_sbr(sb, bindex);
+ if (au_br_writable(br->br_perm)
+ && !au_test_fs_bad_xino(au_br_sb(br))) {
+ bwr = bindex;
+ break;
+ }
+ }
+
+ if (bwr >= 0) {
+ file = ERR_PTR(-ENOMEM);
+ page = (void *)__get_free_page(GFP_NOFS);
+ if (unlikely(!page))
+ goto out;
+ path.mnt = au_br_mnt(br);
+ path.dentry = au_h_dptr(sb->s_root, bwr);
+ p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME));
+ file = (void *)p;
+ if (!IS_ERR(p)) {
+ strcat(p, "/" AUFS_XINO_FNAME);
+ AuDbg("%s\n", p);
+ file = au_xino_create(sb, p, /*silent*/0);
+ if (!IS_ERR(file))
+ au_xino_brid_set(sb, br->br_id);
+ }
+ free_page((unsigned long)page);
+ } else {
+ file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0);
+ if (IS_ERR(file))
+ goto out;
+ h_sb = file->f_path.dentry->d_sb;
+ if (unlikely(au_test_fs_bad_xino(h_sb))) {
+ pr_err("xino doesn't support %s(%s)\n",
+ AUFS_XINO_DEFPATH, au_sbtype(h_sb));
+ fput(file);
+ file = ERR_PTR(-EINVAL);
+ }
+ if (!IS_ERR(file))
+ au_xino_brid_set(sb, -1);
+ }
+
+out:
+ return file;
+}
+
+/* ---------------------------------------------------------------------- */
+
+int au_xino_path(struct seq_file *seq, struct file *file)
+{
+ int err;
+
+ err = au_seq_path(seq, &file->f_path);
+ if (unlikely(err))
+ goto out;
+
+#define Deleted "\\040(deleted)"
+ seq->count -= sizeof(Deleted) - 1;
+ AuDebugOn(memcmp(seq->buf + seq->count, Deleted,
+ sizeof(Deleted) - 1));
+#undef Deleted
+
+out:
+ return err;
+}
diff --git a/fs/coredump.c b/fs/coredump.c
index 9ea87e9fd..47c32c3bf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -32,6 +32,9 @@
#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/path.h>
#include <linux/timekeeping.h>
#include <asm/uaccess.h>
@@ -649,6 +652,8 @@ void do_coredump(const siginfo_t *siginfo)
}
} else {
struct inode *inode;
+ int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
+ O_LARGEFILE | O_EXCL;
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
@@ -687,10 +692,27 @@ void do_coredump(const siginfo_t *siginfo)
* what matters is that at least one of the two processes
* writes its coredump successfully, not which one.
*/
- cprm.file = filp_open(cn.corename,
- O_CREAT | 2 | O_NOFOLLOW |
- O_LARGEFILE | O_EXCL,
- 0600);
+ if (need_suid_safe) {
+ /*
+ * Using user namespaces, normal user tasks can change
+ * their current->fs->root to point to arbitrary
+ * directories. Since the intention of the "only dump
+ * with a fully qualified path" rule is to control where
+ * coredumps may be placed using root privileges,
+ * current->fs->root must not be used. Instead, use the
+ * root directory of init_task.
+ */
+ struct path root;
+
+ task_lock(&init_task);
+ get_fs_root(init_task.fs, &root);
+ task_unlock(&init_task);
+ cprm.file = file_open_root(root.dentry, root.mnt,
+ cn.corename, open_flags, 0600);
+ path_put(&root);
+ } else {
+ cprm.file = filp_open(cn.corename, open_flags, 0600);
+ }
if (IS_ERR(cprm.file))
goto fail_unlock;
diff --git a/fs/dcache.c b/fs/dcache.c
index 2398f9f94..72d3cc89c 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1156,7 +1156,7 @@ enum d_walk_ret {
*
* The @enter() and @finish() callbacks are called with d_lock held.
*/
-static void d_walk(struct dentry *parent, void *data,
+void d_walk(struct dentry *parent, void *data,
enum d_walk_ret (*enter)(void *, struct dentry *),
void (*finish)(void *))
{
@@ -1261,6 +1261,7 @@ rename_retry:
seq = 1;
goto again;
}
+EXPORT_SYMBOL_GPL(d_walk);
/*
* Search for at least 1 mount point in the dentry's subdirs.
diff --git a/fs/exec.c b/fs/exec.c
index ab176c0d7..2cc69a450 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -105,6 +105,7 @@ bool path_noexec(const struct path *path)
return (path->mnt->mnt_flags & MNT_NOEXEC) ||
(path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
}
+EXPORT_SYMBOL_GPL(path_noexec);
#ifdef CONFIG_USELIB
/*
diff --git a/fs/exfat/exfat_super.c b/fs/exfat/exfat_super.c
index a003a7212..02d4fd2c7 100644
--- a/fs/exfat/exfat_super.c
+++ b/fs/exfat/exfat_super.c
@@ -1359,7 +1359,19 @@ const struct inode_operations exfat_dir_inode_operations = {
/*======================================================================*/
/* File Operations */
/*======================================================================*/
-#if LINUX_VERSION_CODE > KERNEL_VERSION(4,1,0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0)
+static const char *exfat_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done)
+{
+ struct exfat_inode_info *ei = EXFAT_I(inode);
+ if (ei->target != NULL) {
+ char *cookie = ei->target;
+ if (cookie != NULL) {
+ return (char *)(ei->target);
+ }
+ }
+ return NULL;
+}
+#elif LINUX_VERSION_CODE > KERNEL_VERSION(4,1,0)
static const char *exfat_follow_link(struct dentry *dentry, void **cookie)
{
struct exfat_inode_info *ei = EXFAT_I(dentry->d_inode);
@@ -1376,7 +1388,12 @@ static void *exfat_follow_link(struct dentry *dentry, struct nameidata *nd)
const struct inode_operations exfat_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = exfat_follow_link,
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+ .follow_link = exfat_follow_link,
+ #endif
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0)
+ .get_link = exfat_get_link,
+ #endif
};
static int exfat_file_release(struct inode *inode, struct file *filp)
@@ -1970,7 +1987,7 @@ static void exfat_evict_inode(struct inode *inode)
if (!inode->i_nlink)
i_size_write(inode, 0);
invalidate_inode_buffers(inode);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,80)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
end_writeback(inode);
#else
clear_inode(inode);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 350a2c8cf..04fd33cbf 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -29,7 +29,7 @@
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
-static int setfl(int fd, struct file * filp, unsigned long arg)
+int setfl(int fd, struct file * filp, unsigned long arg)
{
struct inode * inode = file_inode(filp);
int error = 0;
@@ -60,6 +60,8 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
if (filp->f_op->check_flags)
error = filp->f_op->check_flags(arg);
+ if (!error && filp->f_op->setfl)
+ error = filp->f_op->setfl(filp, arg);
if (error)
return error;
@@ -80,6 +82,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
out:
return error;
}
+EXPORT_SYMBOL_GPL(setfl);
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
diff --git a/fs/fhandle.c b/fs/fhandle.c
index d59712dfa..ca3c3dd01 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
path_put(&path);
return fd;
}
- file = file_open_root(path.dentry, path.mnt, "", open_flag);
+ file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
if (IS_ERR(file)) {
put_unused_fd(fd);
retval = PTR_ERR(file);
diff --git a/fs/file_table.c b/fs/file_table.c
index ad17e05eb..ae9f2676d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -147,6 +147,7 @@ over:
}
return ERR_PTR(-ENFILE);
}
+EXPORT_SYMBOL_GPL(get_empty_filp);
/**
* alloc_file - allocate and initialize a 'struct file'
@@ -258,6 +259,7 @@ void flush_delayed_fput(void)
{
delayed_fput(NULL);
}
+EXPORT_SYMBOL_GPL(flush_delayed_fput);
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
@@ -300,6 +302,7 @@ void __fput_sync(struct file *file)
}
EXPORT_SYMBOL(fput);
+EXPORT_SYMBOL_GPL(__fput_sync);
void put_filp(struct file *file)
{
@@ -308,6 +311,7 @@ void put_filp(struct file *file)
file_free(file);
}
}
+EXPORT_SYMBOL_GPL(put_filp);
void __init files_init(void)
{
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 5c46ed9f3..fee81e876 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -281,13 +281,15 @@ locked_inode_to_wb_and_lock_list(struct inode *inode)
wb_get(wb);
spin_unlock(&inode->i_lock);
spin_lock(&wb->list_lock);
- wb_put(wb); /* not gonna deref it anymore */
/* i_wb may have changed inbetween, can't use inode_to_wb() */
- if (likely(wb == inode->i_wb))
- return wb; /* @inode already has ref */
+ if (likely(wb == inode->i_wb)) {
+ wb_put(wb); /* @inode already has ref */
+ return wb;
+ }
spin_unlock(&wb->list_lock);
+ wb_put(wb);
cpu_relax();
spin_lock(&inode->i_lock);
}
@@ -1337,10 +1339,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
* and does more profound writeback list handling in writeback_sb_inodes().
*/
-static int
-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+static int writeback_single_inode(struct inode *inode,
+ struct writeback_control *wbc)
{
+ struct bdi_writeback *wb;
int ret = 0;
spin_lock(&inode->i_lock);
@@ -1378,7 +1380,8 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
ret = __writeback_single_inode(inode, wbc);
wbc_detach_inode(wbc);
- spin_lock(&wb->list_lock);
+
+ wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
/*
* If inode is clean, remove it from writeback lists. Otherwise don't
@@ -1453,6 +1456,7 @@ static long writeback_sb_inodes(struct super_block *sb,
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
+ struct bdi_writeback *tmp_wb;
if (inode->i_sb != sb) {
if (work->sb) {
@@ -1543,15 +1547,23 @@ static long writeback_sb_inodes(struct super_block *sb,
cond_resched();
}
-
- spin_lock(&wb->list_lock);
+ /*
+ * Requeue @inode if still dirty. Be careful as @inode may
+ * have been switched to another wb in the meantime.
+ */
+ tmp_wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
wrote++;
- requeue_inode(inode, wb, &wbc);
+ requeue_inode(inode, tmp_wb, &wbc);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
+ if (unlikely(tmp_wb != wb)) {
+ spin_unlock(&tmp_wb->list_lock);
+ spin_lock(&wb->list_lock);
+ }
+
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
@@ -2338,7 +2350,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
*/
int write_inode_now(struct inode *inode, int sync)
{
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -2350,7 +2361,7 @@ int write_inode_now(struct inode *inode, int sync)
wbc.nr_to_write = 0;
might_sleep();
- return writeback_single_inode(inode, wb, &wbc);
+ return writeback_single_inode(inode, &wbc);
}
EXPORT_SYMBOL(write_inode_now);
@@ -2367,7 +2378,7 @@ EXPORT_SYMBOL(write_inode_now);
*/
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
- return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
+ return writeback_single_inode(inode, wbc);
}
EXPORT_SYMBOL(sync_inode);
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 8e3ee1936..c5b6b7165 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
- struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
loff_t pos = 0;
return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
- struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
loff_t pos = 0;
/*
* No locking or generic_write_checks(), the server is
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b03d253ec..416108b42 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
}
}
+static void fuse_io_release(struct kref *kref)
+{
+ kfree(container_of(kref, struct fuse_io_priv, refcnt));
+}
+
static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
{
if (io->err)
@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
}
io->iocb->ki_complete(io->iocb, res, 0);
- kfree(io);
}
+
+ kref_put(&io->refcnt, fuse_io_release);
}
static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
size_t num_bytes, struct fuse_io_priv *io)
{
spin_lock(&io->lock);
+ kref_get(&io->refcnt);
io->size += num_bytes;
io->reqs++;
spin_unlock(&io->lock);
@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
static int fuse_do_readpage(struct file *file, struct page *page)
{
- struct fuse_io_priv io = { .async = 0, .file = file };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
size_t res;
unsigned offset;
unsigned i;
- struct fuse_io_priv io = { .async = 0, .file = file };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
for (i = 0; i < req->num_pages; i++)
fuse_wait_on_page_writeback(inode, req->pages[i]->index);
@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
return __fuse_direct_read(&io, to, &iocb->ki_pos);
}
@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- struct fuse_io_priv io = { .async = 0, .file = file };
+ struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
ssize_t res;
if (is_bad_inode(inode))
@@ -2843,6 +2850,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
loff_t i_size;
size_t count = iov_iter_count(iter);
struct fuse_io_priv *io;
+ bool is_sync = is_sync_kiocb(iocb);
pos = offset;
inode = file->f_mapping->host;
@@ -2863,6 +2871,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
if (!io)
return -ENOMEM;
spin_lock_init(&io->lock);
+ kref_init(&io->refcnt);
io->reqs = 1;
io->bytes = -1;
io->size = 0;
@@ -2882,12 +2891,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
* to wait on real async I/O requests, so we must submit this request
* synchronously.
*/
- if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
+ if (!is_sync && (offset + count > i_size) &&
iov_iter_rw(iter) == WRITE)
io->async = false;
- if (io->async && is_sync_kiocb(iocb))
+ if (io->async && is_sync) {
+ /*
+ * Additional reference to keep io around after
+ * calling fuse_aio_complete()
+ */
+ kref_get(&io->refcnt);
io->done = &wait;
+ }
if (iov_iter_rw(iter) == WRITE) {
ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
@@ -2900,14 +2915,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
/* we have a non-extending, async request, so return */
- if (!is_sync_kiocb(iocb))
+ if (!is_sync)
return -EIOCBQUEUED;
wait_for_completion(&wait);
ret = fuse_get_res_by_io(io);
}
- kfree(io);
+ kref_put(&io->refcnt, fuse_io_release);
if (iov_iter_rw(iter) == WRITE) {
if (ret > 0)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ce394b5fe..eddbe02c4 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -22,6 +22,7 @@
#include <linux/rbtree.h>
#include <linux/poll.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@@ -243,6 +244,7 @@ struct fuse_args {
/** The request IO state (for asynchronous processing) */
struct fuse_io_priv {
+ struct kref refcnt;
int async;
spinlock_t lock;
unsigned reqs;
@@ -256,6 +258,13 @@ struct fuse_io_priv {
struct completion *done;
};
+#define FUSE_IO_PRIV_SYNC(f) \
+{ \
+ .refcnt = { ATOMIC_INIT(1) }, \
+ .async = 0, \
+ .file = f, \
+}
+
/**
* Request flags
*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 81e622681..624a57a9c 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1408,11 +1408,12 @@ out:
/**
* jbd2_mark_journal_empty() - Mark on disk journal as empty.
* @journal: The journal to update.
+ * @write_op: With which operation should we write the journal sb
*
* Update a journal's dynamic superblock fields to show that journal is empty.
* Write updated superblock to disk waiting for IO to complete.
*/
-static void jbd2_mark_journal_empty(journal_t *journal)
+static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
{
journal_superblock_t *sb = journal->j_superblock;
@@ -1430,7 +1431,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
sb->s_start = cpu_to_be32(0);
read_unlock(&journal->j_state_lock);
- jbd2_write_superblock(journal, WRITE_FUA);
+ jbd2_write_superblock(journal, write_op);
/* Log is no longer empty */
write_lock(&journal->j_state_lock);
@@ -1716,7 +1717,13 @@ int jbd2_journal_destroy(journal_t *journal)
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
mutex_lock(&journal->j_checkpoint_mutex);
- jbd2_mark_journal_empty(journal);
+
+ write_lock(&journal->j_state_lock);
+ journal->j_tail_sequence =
+ ++journal->j_transaction_sequence;
+ write_unlock(&journal->j_state_lock);
+
+ jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
} else
err = -EIO;
@@ -1975,7 +1982,7 @@ int jbd2_journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
- jbd2_mark_journal_empty(journal);
+ jbd2_mark_journal_empty(journal, WRITE_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
write_lock(&journal->j_state_lock);
J_ASSERT(!journal->j_running_transaction);
@@ -2021,7 +2028,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
if (write) {
/* Lock to make assertions happy... */
mutex_lock(&journal->j_checkpoint_mutex);
- jbd2_mark_journal_empty(journal);
+ jbd2_mark_journal_empty(journal, WRITE_FUA);
mutex_unlock(&journal->j_checkpoint_mutex);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 4fb1691b4..97654d2d8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -463,6 +463,7 @@ void __mnt_drop_write(struct vfsmount *mnt)
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
+EXPORT_SYMBOL_GPL(__mnt_drop_write);
/**
* mnt_drop_write - give up write access to a mount
@@ -1811,6 +1812,7 @@ int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
}
return 0;
}
+EXPORT_SYMBOL_GPL(iterate_mounts);
static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 4cba7865f..f8082c7cd 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -878,6 +878,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&exp, &dentry);
if (err)
return err;
+ fh_unlock(&cstate->current_fh);
if (d_really_is_negative(dentry)) {
exp_put(exp);
err = nfserr_noent;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index d6ef0955a..1600ec470 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1072,8 +1072,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
READ_BUF(4);
rename->rn_snamelen = be32_to_cpup(p++);
- READ_BUF(rename->rn_snamelen + 4);
+ READ_BUF(rename->rn_snamelen);
SAVEMEM(rename->rn_sname, rename->rn_snamelen);
+ READ_BUF(4);
rename->rn_tnamelen = be32_to_cpup(p++);
READ_BUF(rename->rn_tnamelen);
SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
@@ -1155,13 +1156,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
READ_BUF(8);
setclientid->se_callback_prog = be32_to_cpup(p++);
setclientid->se_callback_netid_len = be32_to_cpup(p++);
-
- READ_BUF(setclientid->se_callback_netid_len + 4);
+ READ_BUF(setclientid->se_callback_netid_len);
SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
+ READ_BUF(4);
setclientid->se_callback_addr_len = be32_to_cpup(p++);
- READ_BUF(setclientid->se_callback_addr_len + 4);
+ READ_BUF(setclientid->se_callback_addr_len);
SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
+ READ_BUF(4);
setclientid->se_callback_ident = be32_to_cpup(p++);
DECODE_TAIL;
@@ -1835,8 +1837,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
READ_BUF(4);
argp->taglen = be32_to_cpup(p++);
- READ_BUF(argp->taglen + 8);
+ READ_BUF(argp->taglen);
SAVEMEM(argp->tag, argp->taglen);
+ READ_BUF(8);
argp->minorversion = be32_to_cpup(p++);
argp->opcnt = be32_to_cpup(p++);
max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
diff --git a/fs/notify/group.c b/fs/notify/group.c
index d16b62cb2..53e45b61d 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -22,6 +22,7 @@
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/wait.h>
+#include <linux/module.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
@@ -72,6 +73,7 @@ void fsnotify_get_group(struct fsnotify_group *group)
{
atomic_inc(&group->refcnt);
}
+EXPORT_SYMBOL_GPL(fsnotify_get_group);
/*
* Drop a reference to a group. Free it if it's through.
@@ -81,6 +83,7 @@ void fsnotify_put_group(struct fsnotify_group *group)
if (atomic_dec_and_test(&group->refcnt))
fsnotify_final_destroy_group(group);
}
+EXPORT_SYMBOL_GPL(fsnotify_put_group);
/*
* Create a new fsnotify_group and hold a reference for the group returned.
@@ -109,6 +112,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
return group;
}
+EXPORT_SYMBOL_GPL(fsnotify_alloc_group);
int fsnotify_fasync(int fd, struct file *file, int on)
{
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 7115c5d7d..ac2bd697f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -113,6 +113,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
mark->free_mark(mark);
}
}
+EXPORT_SYMBOL_GPL(fsnotify_put_mark);
/* Calculate mask of events for a list of marks */
u32 fsnotify_recalc_mask(struct hlist_head *head)
@@ -213,6 +214,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
mutex_unlock(&group->mark_mutex);
fsnotify_free_mark(mark);
}
+EXPORT_SYMBOL_GPL(fsnotify_destroy_mark);
void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock)
{
@@ -398,6 +400,7 @@ err:
return ret;
}
+EXPORT_SYMBOL_GPL(fsnotify_add_mark);
int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups)
@@ -498,6 +501,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
atomic_set(&mark->refcnt, 1);
mark->free_mark = free_mark;
}
+EXPORT_SYMBOL_GPL(fsnotify_init_mark);
static void fsnotify_mark_destroy(struct work_struct *work)
{
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a76b9ea77..a2370e2c7 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1445,8 +1445,8 @@ static void o2hb_region_release(struct config_item *item)
debugfs_remove(reg->hr_debug_dir);
kfree(reg->hr_db_livenodes);
kfree(reg->hr_db_regnum);
- kfree(reg->hr_debug_elapsed_time);
- kfree(reg->hr_debug_pinned);
+ kfree(reg->hr_db_elapsed_time);
+ kfree(reg->hr_db_pinned);
spin_lock(&o2hb_live_lock);
list_del(&reg->hr_all_item);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index e36d63ff1..f90931335 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
struct dlm_lock *lock, int flags, int type)
{
enum dlm_status status;
+ u8 old_owner = res->owner;
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
status = DLM_DENIED;
goto bail;
}
+
+ if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
+ mlog(0, "last convert request returned DLM_RECOVERING, but "
+ "owner has already queued and sent ast to me. res %.*s, "
+ "(cookie=%u:%llu, type=%d, conv=%d)\n",
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ml.type, lock->ml.convert_type);
+ status = DLM_NORMAL;
+ goto bail;
+ }
+
res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* move lock to local convert queue */
/* do not alter lock refcount. switching lists. */
@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
lock->convert_pending = 0;
- /* if it failed, move it back to granted queue */
+ /* if it failed, move it back to granted queue.
+ * if master returns DLM_NORMAL and then down before sending ast,
+ * it may have already been moved to granted queue, reset to
+ * DLM_RECOVERING and retry convert */
if (status != DLM_NORMAL) {
if (status != DLM_NOTQUEUED)
dlm_error(status);
dlm_revert_pending_convert(res, lock);
+ } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+ (old_owner != res->owner)) {
+ mlog(0, "res %.*s is in recovering or has been recovered.\n",
+ res->lockname.len, res->lockname.name);
+ status = DLM_RECOVERING;
}
bail:
spin_unlock(&res->spinlock);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index b94a425f0..23d0ab881 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2071,7 +2071,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
dlm_lock_get(lock);
if (lock->convert_pending) {
/* move converting lock back to granted */
- BUG_ON(i != DLM_CONVERTING_LIST);
mlog(0, "node died with convert pending "
"on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
diff --git a/fs/open.c b/fs/open.c
index 16f561ea7..e6933ee33 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -67,6 +67,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
inode_unlock(dentry->d_inode);
return ret;
}
+EXPORT_SYMBOL_GPL(do_truncate);
long vfs_truncate(struct path *path, loff_t length)
{
@@ -681,6 +682,7 @@ int open_check_o_direct(struct file *f)
}
return 0;
}
+EXPORT_SYMBOL_GPL(open_check_o_direct);
static int do_dentry_open(struct file *f,
struct inode *inode,
@@ -995,14 +997,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
EXPORT_SYMBOL(filp_open);
struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
- const char *filename, int flags)
+ const char *filename, int flags, umode_t mode)
{
struct open_flags op;
- int err = build_open_flags(flags, 0, &op);
+ int err = build_open_flags(flags, mode, &op);
if (err)
return ERR_PTR(err);
- if (flags & O_CREAT)
- return ERR_PTR(-EINVAL);
return do_file_open_root(dentry, mnt, filename, &op);
}
EXPORT_SYMBOL(file_open_root);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4f764c2ac..11aba74cb 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -505,7 +505,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m, "0 0 0\n");
else
seq_printf(m, "%llu %llu %lu\n",
- (unsigned long long)task->se.sum_exec_runtime,
+ (unsigned long long)tsk_seruntime(task),
(unsigned long long)task->sched_info.run_delay,
task->sched_info.pcount);
@@ -1933,7 +1933,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
- *path = vma->vm_file->f_path;
+ *path = vma_pr_or_file(vma)->f_path;
path_get(path);
rc = 0;
}
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index f8595e8b5..cb8eda060 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -45,7 +45,10 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
file = region->vm_file;
if (file) {
- struct inode *inode = file_inode(region->vm_file);
+ struct inode *inode;
+
+ file = vmr_pr_or_file(region);
+ inode = file_inode(file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fa95ab2d3..d4403544a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -298,7 +298,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
const char *name = NULL;
if (file) {
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode;
+
+ file = vma_pr_or_file(vma);
+ inode = file_inode(file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
@@ -1576,7 +1579,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
struct vm_area_struct *vma = v;
struct numa_maps *md = &numa_priv->md;
- struct file *file = vma->vm_file;
+ struct file *file = vma_pr_or_file(vma);
struct mm_struct *mm = vma->vm_mm;
struct mm_walk walk = {
.hugetlb_entry = gather_hugetlb_stats,
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index faacb0c0d..17b43be13 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -163,7 +163,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
file = vma->vm_file;
if (file) {
- struct inode *inode = file_inode(vma->vm_file);
+ struct inode *inode;
+
+ file = vma_pr_or_file(vma);
+ inode = file_inode(file);
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 2256e7e23..3f1190d18 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -199,6 +199,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
if (sb->s_op->show_devname) {
seq_puts(m, "device ");
err = sb->s_op->show_devname(m, mnt_path.dentry);
+ if (err)
+ goto out;
} else {
if (r->mnt_devname) {
seq_puts(m, "device ");
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 3c3b81bb6..850d17fa0 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1398,7 +1398,7 @@ static int dquot_active(const struct inode *inode)
static int __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
- struct dquot **dquots, *got[MAXQUOTAS];
+ struct dquot **dquots, *got[MAXQUOTAS] = {};
struct super_block *sb = inode->i_sb;
qsize_t rsv;
int ret = 0;
@@ -1415,7 +1415,6 @@ static int __dquot_initialize(struct inode *inode, int type)
int rc;
struct dquot *dquot;
- got[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
/*
diff --git a/fs/read_write.c b/fs/read_write.c
index dadf24e5c..9461d1264 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -534,6 +534,30 @@ ssize_t __vfs_write(struct file *file, const char __user *p, size_t count,
}
EXPORT_SYMBOL(__vfs_write);
+vfs_readf_t vfs_readf(struct file *file)
+{
+ const struct file_operations *fop = file->f_op;
+
+ if (fop->read)
+ return fop->read;
+ if (fop->read_iter)
+ return new_sync_read;
+ return ERR_PTR(-ENOSYS);
+}
+EXPORT_SYMBOL_GPL(vfs_readf);
+
+vfs_writef_t vfs_writef(struct file *file)
+{
+ const struct file_operations *fop = file->f_op;
+
+ if (fop->write)
+ return fop->write;
+ if (fop->write_iter)
+ return new_sync_write;
+ return ERR_PTR(-ENOSYS);
+}
+EXPORT_SYMBOL_GPL(vfs_writef);
+
ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
{
mm_segment_t old_fs;
diff --git a/fs/splice.c b/fs/splice.c
index 82bc0d64f..3d8407c0d 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
unsigned int spd_pages = spd->nr_pages;
int ret, do_wakeup, page_nr;
+ if (!spd_pages)
+ return 0;
+
ret = 0;
do_wakeup = 0;
page_nr = 0;
@@ -1108,8 +1111,8 @@ EXPORT_SYMBOL(generic_splice_sendpage);
/*
* Attempt to initiate a splice from pipe to file.
*/
-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
- loff_t *ppos, size_t len, unsigned int flags)
+long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
{
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
loff_t *, size_t, unsigned int);
@@ -1121,13 +1124,14 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
return splice_write(pipe, out, ppos, len, flags);
}
+EXPORT_SYMBOL_GPL(do_splice_from);
/*
* Attempt to initiate a splice from a file to a pipe.
*/
-static long do_splice_to(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe, size_t len,
- unsigned int flags)
+long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
ssize_t (*splice_read)(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
@@ -1147,6 +1151,7 @@ static long do_splice_to(struct file *in, loff_t *ppos,
return splice_read(in, ppos, pipe, len, flags);
}
+EXPORT_SYMBOL_GPL(do_splice_to);
/**
* splice_direct_to_actor - splices data directly between two non-pipes
diff --git a/fs/xattr.c b/fs/xattr.c
index 4861322e2..c4bb039ac 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -207,6 +207,7 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value,
*xattr_value = value;
return error;
}
+EXPORT_SYMBOL_GPL(vfs_getxattr_alloc);
ssize_t
vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 0ef7c2ed3..4fa14820e 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
sbp->namelen,
sbp->valuelen,
&sbp->name[sbp->namelen]);
- if (error)
+ if (error) {
+ kmem_free(sbuf);
return error;
+ }
if (context->seen_enough)
break;
cursor->offset++;
@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
args.rmtblkcnt = xfs_attr3_rmt_blocks(
args.dp->i_mount, valuelen);
retval = xfs_attr_rmtval_get(&args);
- if (retval)
- return retval;
- retval = context->put_listent(context,
- entry->flags,
- name_rmt->name,
- (int)name_rmt->namelen,
- valuelen,
- args.value);
+ if (!retval)
+ retval = context->put_listent(context,
+ entry->flags,
+ name_rmt->name,
+ (int)name_rmt->namelen,
+ valuelen,
+ args.value);
kmem_free(args.value);
} else {
retval = context->put_listent(context,
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index c30266e94..8ef0ccbf8 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
* @nr: the bit to set
* @addr: the address to start counting from
*
- * This operation is like clear_bit_unlock, however it is not atomic.
- * It does provide release barrier semantics so it can be used to unlock
- * a bit lock, however it would only be used if no other CPU can modify
- * any bits in the memory until the lock is released (a good example is
- * if the bit lock itself protects access to the other bits in the word).
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
*/
#define __clear_bit_unlock(nr, addr) \
do { \
- smp_mb(); \
- __clear_bit(nr, addr); \
+ smp_mb__before_atomic(); \
+ clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 789471dba..89d944b25 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -210,6 +210,9 @@ struct css_set {
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
+ /* dead and being drained, ignore for migration */
+ bool dead;
+
/* For RCU-protected deletion */
struct rcu_head rcu_head;
};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ec1c61c87..899ab9f85 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -124,6 +124,8 @@ struct dm_dev {
char name[16];
};
+dev_t dm_get_dev_t(const char *path);
+
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
diff --git a/include/linux/file.h b/include/linux/file.h
index f87d30882..9a290b36b 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -19,6 +19,7 @@ struct dentry;
struct path;
extern struct file *alloc_file(struct path *, fmode_t mode,
const struct file_operations *fop);
+extern struct file *get_empty_filp(void);
static inline void fput_light(struct file *file, int fput_needed)
{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cf958ec45..e35963a6f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1260,6 +1260,7 @@ extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
+extern int setfl(int fd, struct file * filp, unsigned long arg);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
extern void f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
@@ -1648,6 +1649,7 @@ struct file_operations {
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
+ int (*setfl)(struct file *, unsigned long);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
@@ -1706,6 +1708,12 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec *fast_pointer,
struct iovec **ret_pointer);
+typedef ssize_t (*vfs_readf_t)(struct file *, char __user *, size_t, loff_t *);
+typedef ssize_t (*vfs_writef_t)(struct file *, const char __user *, size_t,
+ loff_t *);
+vfs_readf_t vfs_readf(struct file *file);
+vfs_writef_t vfs_writef(struct file *file);
+
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
@@ -2263,7 +2271,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
- const char *, int);
+ const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f2cb8d455..056d3227d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -157,8 +157,6 @@ extern struct task_group root_task_group;
# define INIT_VTIME(tsk)
#endif
-#define INIT_TASK_COMM "swapper"
-
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
@@ -187,6 +185,78 @@ extern struct task_group root_task_group;
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
+#ifdef CONFIG_SCHED_BFS
+#define INIT_TASK_COMM "BFS"
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .stack = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = PF_KTHREAD, \
+ .prio = NORMAL_PRIO, \
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = NORMAL_PRIO, \
+ .deadline = 0, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .time_slice = HZ, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
+ .comm = INIT_TASK_COMM, \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .nsproxy = &init_nsproxy, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_TASK_RCU_TASKS(tsk) \
+ INIT_CPUSET_SEQ(tsk) \
+ INIT_RT_MUTEXES(tsk) \
+ INIT_PREV_CPUTIME(tsk) \
+ INIT_VTIME(tsk) \
+ INIT_NUMA_BALANCING(tsk) \
+ INIT_KASAN(tsk) \
+}
+#else /* CONFIG_SCHED_BFS */
+#define INIT_TASK_COMM "swapper"
#define INIT_TASK(tsk) \
{ \
.state = 0, \
@@ -261,7 +331,7 @@ extern struct task_group root_task_group;
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
}
-
+#endif /* CONFIG_SCHED_BFS */
#define INIT_CPU_TIMERS(cpu_timers) \
{ \
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index beb9ce1c2..ce2fc3c74 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -52,6 +52,8 @@ enum {
*/
static inline int task_nice_ioprio(struct task_struct *task)
{
+ if (iso_task(task))
+ return 0;
return (task_nice(task) + 20) / 5;
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5fdc55312..9384572f6 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -164,7 +164,11 @@ static inline u64 get_jiffies_64(void)
* Have the 32 bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
+#ifdef CONFIG_SCHED_BFS
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ))
+#else
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
/*
* Change timeval to jiffies, trying to avoid the
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f31638c6e..95452f723 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -635,7 +635,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
@@ -679,7 +679,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
*/
#define trace_puts(str) ({ \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
@@ -701,7 +701,7 @@ extern void trace_dump_stack(int skip);
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ccc992c6a..1043097e9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1217,6 +1217,28 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int);
+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[],
+ int);
+extern void vma_do_get_file(struct vm_area_struct *, const char[], int);
+extern void vma_do_fput(struct vm_area_struct *, const char[], int);
+
+#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \
+ __LINE__)
+#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \
+ __LINE__)
+#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__)
+#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__)
+
+#ifndef CONFIG_MMU
+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int);
+extern void vmr_do_fput(struct vm_region *, const char[], int);
+
+#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \
+ __LINE__)
+#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__)
+#endif /* !CONFIG_MMU */
+
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 624b78b84..1be91c585 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -269,6 +269,7 @@ struct vm_region {
unsigned long vm_top; /* region allocated to here */
unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
struct file *vm_file; /* the backing file or NULL */
+ struct file *vm_prfile; /* the virtual backing file or NULL */
int vm_usage; /* region usage count (access under nommu_region_sem) */
bool vm_icache_flushed : 1; /* true if the icache has been flushed for
@@ -343,6 +344,7 @@ struct vm_area_struct {
unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
units, *not* PAGE_CACHE_SIZE */
struct file * vm_file; /* File we map to (can be NULL). */
+ struct file *vm_prfile; /* shadow of vm_file */
void * vm_private_data; /* was vm_pte (shared mem) */
#ifndef CONFIG_MMU
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27716254d..60042ab5d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -359,6 +359,7 @@ struct pci_dev {
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
+ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 838a89a78..a1aec10f1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,7 +176,7 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_BFS)
extern void update_cpu_load_nohz(int active);
#else
static inline void update_cpu_load_nohz(int active) { }
@@ -337,8 +337,6 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
-extern int runqueue_is_locked(int cpu);
-
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
@@ -1393,9 +1391,11 @@ struct task_struct {
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS)
+ int on_cpu;
+#endif
#ifdef CONFIG_SMP
struct llist_node wake_entry;
- int on_cpu;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -1403,12 +1403,29 @@ struct task_struct {
int wake_cpu;
#endif
int on_rq;
-
int prio, static_prio, normal_prio;
unsigned int rt_priority;
+#ifdef CONFIG_SCHED_BFS
+ int time_slice;
+ u64 deadline;
+ struct list_head run_list;
+ u64 last_ran;
+ u64 sched_time; /* sched_clock time spent running */
+#ifdef CONFIG_SMT_NICE
+ int smt_bias; /* Policy/nice level bias across smt siblings */
+#endif
+#ifdef CONFIG_SMP
+ bool sticky; /* Soft affined flag */
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ bool zerobound; /* Bound to CPU0 for hotplug */
+#endif
+ unsigned long rt_timeout;
+#else /* CONFIG_SCHED_BFS */
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#endif
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
@@ -1528,6 +1545,9 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
+#ifdef CONFIG_SCHED_BFS
+ unsigned long utime_pc, stime_pc;
+#endif
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -1846,6 +1866,63 @@ extern int arch_task_struct_size __read_mostly;
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
+#ifdef CONFIG_SCHED_BFS
+bool grunqueue_is_locked(void);
+void grq_unlock_wait(void);
+void cpu_scaling(int cpu);
+void cpu_nonscaling(int cpu);
+#define tsk_seruntime(t) ((t)->sched_time)
+#define tsk_rttimeout(t) ((t)->rt_timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+}
+
+static inline int runqueue_is_locked(int cpu)
+{
+ return grunqueue_is_locked();
+}
+
+void print_scheduler_version(void);
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return (p->policy == SCHED_ISO);
+}
+#else /* CFS */
+extern int runqueue_is_locked(int cpu);
+static inline void cpu_scaling(int cpu)
+{
+}
+
+static inline void cpu_nonscaling(int cpu)
+{
+}
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+ p->nr_cpus_allowed = current->nr_cpus_allowed;
+}
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO"CFS CPU scheduler.\n");
+}
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return false;
+}
+
+/* Anyone feel like implementing this? */
+static inline bool above_background_load(void)
+{
+ return false;
+}
+#endif /* CONFIG_SCHED_BFS */
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -2261,7 +2338,7 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
}
#endif
-#ifdef CONFIG_NO_HZ_COMMON
+#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_BFS)
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
@@ -2334,7 +2411,7 @@ extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS)
extern void sched_exec(void);
#else
#define sched_exec() {}
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index d9cf5a576..7d5d0b861 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -19,8 +19,20 @@
*/
#define MAX_USER_RT_PRIO 100
+
+#ifdef CONFIG_SCHED_BFS
+/* Note different MAX_RT_PRIO */
+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
+
+#define ISO_PRIO (MAX_RT_PRIO)
+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
+#define IDLE_PRIO (MAX_RT_PRIO + 2)
+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
+#else /* CONFIG_SCHED_BFS */
#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#endif /* CONFIG_SCHED_BFS */
+
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/splice.h b/include/linux/splice.h
index da2751d3b..2e0fca67c 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -83,4 +83,10 @@ extern void splice_shrink_spd(struct splice_pipe_desc *);
extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+
+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
+extern long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e13a1ace5..4a849f19e 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -156,6 +156,7 @@ struct thermal_attr {
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
+ * @trips_disabled; bitmap for disabled trips
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
@@ -191,6 +192,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
+ unsigned long trips_disabled; /* bitmap for disabled trips */
int passive_delay;
int polling_delay;
int temperature;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index d9fb4b043..19199c267 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -589,7 +589,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
count = min_t(int, count, ld->tty->receive_room);
- if (count)
+ if (count && ld->ops->receive_buf)
ld->ops->receive_buf(ld->tty, p, f, count);
}
return count;
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index c21c38ce7..93e63c56f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -168,11 +168,13 @@ int snd_hdac_power_up(struct hdac_device *codec);
int snd_hdac_power_down(struct hdac_device *codec);
int snd_hdac_power_up_pm(struct hdac_device *codec);
int snd_hdac_power_down_pm(struct hdac_device *codec);
+int snd_hdac_keep_power_up(struct hdac_device *codec);
#else
static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
#endif
/*
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ebd10e624..32152e771 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -59,6 +59,7 @@ header-y += atmsvc.h
header-y += atm_tcp.h
header-y += atm_zatm.h
header-y += audit.h
+header-y += aufs_type.h
header-y += auto_fs4.h
header-y += auto_fs.h
header-y += auxvec.h
diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h
new file mode 100644
index 000000000..e783d44e2
--- /dev/null
+++ b/include/uapi/linux/aufs_type.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2005-2016 Junjiro R. Okajima
+ */
+
+#ifndef __AUFS_TYPE_H__
+#define __AUFS_TYPE_H__
+
+#define AUFS_NAME "aufs"
+
+#ifdef __KERNEL__
+/*
+ * define it before including all other headers.
+ * sched.h may use pr_* macros before defining "current", so define the
+ * no-current version first, and re-define later.
+ */
+#define pr_fmt(fmt) AUFS_NAME " %s:%d: " fmt, __func__, __LINE__
+#include <linux/sched.h>
+#undef pr_fmt
+#define pr_fmt(fmt) \
+ AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \
+ (int)sizeof(current->comm), current->comm, current->pid
+#else
+#include <stdint.h>
+#include <sys/types.h>
+#endif /* __KERNEL__ */
+
+#include <linux/limits.h>
+
+#define AUFS_VERSION "4.5"
+
+/* todo? move this to linux-2.6.19/include/magic.h */
+#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's')
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef CONFIG_AUFS_BRANCH_MAX_127
+typedef int8_t aufs_bindex_t;
+#define AUFS_BRANCH_MAX 127
+#else
+typedef int16_t aufs_bindex_t;
+#ifdef CONFIG_AUFS_BRANCH_MAX_511
+#define AUFS_BRANCH_MAX 511
+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023)
+#define AUFS_BRANCH_MAX 1023
+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767)
+#define AUFS_BRANCH_MAX 32767
+#endif
+#endif
+
+#ifdef __KERNEL__
+#ifndef AUFS_BRANCH_MAX
+#error unknown CONFIG_AUFS_BRANCH_MAX value
+#endif
+#endif /* __KERNEL__ */
+
+/* ---------------------------------------------------------------------- */
+
+#define AUFS_FSTYPE AUFS_NAME
+
+#define AUFS_ROOT_INO 2
+#define AUFS_FIRST_INO 11
+
+#define AUFS_WH_PFX ".wh."
+#define AUFS_WH_PFX_LEN ((int)sizeof(AUFS_WH_PFX) - 1)
+#define AUFS_WH_TMP_LEN 4
+/* a limit for rmdir/rename a dir and copyup */
+#define AUFS_MAX_NAMELEN (NAME_MAX \
+ - AUFS_WH_PFX_LEN * 2 /* doubly whiteouted */\
+ - 1 /* dot */\
+ - AUFS_WH_TMP_LEN) /* hex */
+#define AUFS_XINO_FNAME "." AUFS_NAME ".xino"
+#define AUFS_XINO_DEFPATH "/tmp/" AUFS_XINO_FNAME
+#define AUFS_XINO_DEF_SEC 30 /* seconds */
+#define AUFS_XINO_DEF_TRUNC 45 /* percentage */
+#define AUFS_DIRWH_DEF 3
+#define AUFS_RDCACHE_DEF 10 /* seconds */
+#define AUFS_RDCACHE_MAX 3600 /* seconds */
+#define AUFS_RDBLK_DEF 512 /* bytes */
+#define AUFS_RDHASH_DEF 32
+#define AUFS_WKQ_NAME AUFS_NAME "d"
+#define AUFS_MFS_DEF_SEC 30 /* seconds */
+#define AUFS_MFS_MAX_SEC 3600 /* seconds */
+#define AUFS_FHSM_CACHE_DEF_SEC 30 /* seconds */
+#define AUFS_PLINK_WARN 50 /* number of plinks in a single bucket */
+
+/* pseudo-link maintenace under /proc */
+#define AUFS_PLINK_MAINT_NAME "plink_maint"
+#define AUFS_PLINK_MAINT_DIR "fs/" AUFS_NAME
+#define AUFS_PLINK_MAINT_PATH AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME
+
+#define AUFS_DIROPQ_NAME AUFS_WH_PFX ".opq" /* whiteouted doubly */
+#define AUFS_WH_DIROPQ AUFS_WH_PFX AUFS_DIROPQ_NAME
+
+#define AUFS_BASE_NAME AUFS_WH_PFX AUFS_NAME
+#define AUFS_PLINKDIR_NAME AUFS_WH_PFX "plnk"
+#define AUFS_ORPHDIR_NAME AUFS_WH_PFX "orph"
+
+/* doubly whiteouted */
+#define AUFS_WH_BASE AUFS_WH_PFX AUFS_BASE_NAME
+#define AUFS_WH_PLINKDIR AUFS_WH_PFX AUFS_PLINKDIR_NAME
+#define AUFS_WH_ORPHDIR AUFS_WH_PFX AUFS_ORPHDIR_NAME
+
+/* branch permissions and attributes */
+#define AUFS_BRPERM_RW "rw"
+#define AUFS_BRPERM_RO "ro"
+#define AUFS_BRPERM_RR "rr"
+#define AUFS_BRATTR_COO_REG "coo_reg"
+#define AUFS_BRATTR_COO_ALL "coo_all"
+#define AUFS_BRATTR_FHSM "fhsm"
+#define AUFS_BRATTR_UNPIN "unpin"
+#define AUFS_BRATTR_ICEX "icex"
+#define AUFS_BRATTR_ICEX_SEC "icexsec"
+#define AUFS_BRATTR_ICEX_SYS "icexsys"
+#define AUFS_BRATTR_ICEX_TR "icextr"
+#define AUFS_BRATTR_ICEX_USR "icexusr"
+#define AUFS_BRATTR_ICEX_OTH "icexoth"
+#define AUFS_BRRATTR_WH "wh"
+#define AUFS_BRWATTR_NLWH "nolwh"
+#define AUFS_BRWATTR_MOO "moo"
+
+#define AuBrPerm_RW 1 /* writable, hardlinkable wh */
+#define AuBrPerm_RO (1 << 1) /* readonly */
+#define AuBrPerm_RR (1 << 2) /* natively readonly */
+#define AuBrPerm_Mask (AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR)
+
+#define AuBrAttr_COO_REG (1 << 3) /* copy-up on open */
+#define AuBrAttr_COO_ALL (1 << 4)
+#define AuBrAttr_COO_Mask (AuBrAttr_COO_REG | AuBrAttr_COO_ALL)
+
+#define AuBrAttr_FHSM (1 << 5) /* file-based hsm */
+#define AuBrAttr_UNPIN (1 << 6) /* rename-able top dir of
+ branch. meaningless since
+ linux-3.18-rc1 */
+
+/* ignore error in copying XATTR */
+#define AuBrAttr_ICEX_SEC (1 << 7)
+#define AuBrAttr_ICEX_SYS (1 << 8)
+#define AuBrAttr_ICEX_TR (1 << 9)
+#define AuBrAttr_ICEX_USR (1 << 10)
+#define AuBrAttr_ICEX_OTH (1 << 11)
+#define AuBrAttr_ICEX (AuBrAttr_ICEX_SEC \
+ | AuBrAttr_ICEX_SYS \
+ | AuBrAttr_ICEX_TR \
+ | AuBrAttr_ICEX_USR \
+ | AuBrAttr_ICEX_OTH)
+
+#define AuBrRAttr_WH (1 << 12) /* whiteout-able */
+#define AuBrRAttr_Mask AuBrRAttr_WH
+
+#define AuBrWAttr_NoLinkWH (1 << 13) /* un-hardlinkable whiteouts */
+#define AuBrWAttr_MOO (1 << 14) /* move-up on open */
+#define AuBrWAttr_Mask (AuBrWAttr_NoLinkWH | AuBrWAttr_MOO)
+
+#define AuBrAttr_CMOO_Mask (AuBrAttr_COO_Mask | AuBrWAttr_MOO)
+
+/* #warning test userspace */
+#ifdef __KERNEL__
+#ifndef CONFIG_AUFS_FHSM
+#undef AuBrAttr_FHSM
+#define AuBrAttr_FHSM 0
+#endif
+#ifndef CONFIG_AUFS_XATTR
+#undef AuBrAttr_ICEX
+#define AuBrAttr_ICEX 0
+#undef AuBrAttr_ICEX_SEC
+#define AuBrAttr_ICEX_SEC 0
+#undef AuBrAttr_ICEX_SYS
+#define AuBrAttr_ICEX_SYS 0
+#undef AuBrAttr_ICEX_TR
+#define AuBrAttr_ICEX_TR 0
+#undef AuBrAttr_ICEX_USR
+#define AuBrAttr_ICEX_USR 0
+#undef AuBrAttr_ICEX_OTH
+#define AuBrAttr_ICEX_OTH 0
+#endif
+#endif
+
+/* the longest combination */
+/* AUFS_BRATTR_ICEX and AUFS_BRATTR_ICEX_TR don't affect here */
+#define AuBrPermStrSz sizeof(AUFS_BRPERM_RW \
+ "+" AUFS_BRATTR_COO_REG \
+ "+" AUFS_BRATTR_FHSM \
+ "+" AUFS_BRATTR_UNPIN \
+ "+" AUFS_BRATTR_ICEX_SEC \
+ "+" AUFS_BRATTR_ICEX_SYS \
+ "+" AUFS_BRATTR_ICEX_USR \
+ "+" AUFS_BRATTR_ICEX_OTH \
+ "+" AUFS_BRWATTR_NLWH)
+
+typedef struct {
+ char a[AuBrPermStrSz];
+} au_br_perm_str_t;
+
+static inline int au_br_writable(int brperm)
+{
+ return brperm & AuBrPerm_RW;
+}
+
+static inline int au_br_whable(int brperm)
+{
+ return brperm & (AuBrPerm_RW | AuBrRAttr_WH);
+}
+
+static inline int au_br_wh_linkable(int brperm)
+{
+ return !(brperm & AuBrWAttr_NoLinkWH);
+}
+
+static inline int au_br_cmoo(int brperm)
+{
+ return brperm & AuBrAttr_CMOO_Mask;
+}
+
+static inline int au_br_fhsm(int brperm)
+{
+ return brperm & AuBrAttr_FHSM;
+}
+
+/* ---------------------------------------------------------------------- */
+
+/* ioctl */
+enum {
+ /* readdir in userspace */
+ AuCtl_RDU,
+ AuCtl_RDU_INO,
+
+ AuCtl_WBR_FD, /* pathconf wrapper */
+ AuCtl_IBUSY, /* busy inode */
+ AuCtl_MVDOWN, /* move-down */
+ AuCtl_BR, /* info about branches */
+ AuCtl_FHSM_FD /* connection for fhsm */
+};
+
+/* borrowed from linux/include/linux/kernel.h */
+#ifndef ALIGN
+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
+#endif
+
+/* borrowed from linux/include/linux/compiler-gcc3.h */
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+#ifdef __KERNEL__
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+#endif
+
+struct au_rdu_cookie {
+ uint64_t h_pos;
+ int16_t bindex;
+ uint8_t flags;
+ uint8_t pad;
+ uint32_t generation;
+} __aligned(8);
+
+struct au_rdu_ent {
+ uint64_t ino;
+ int16_t bindex;
+ uint8_t type;
+ uint8_t nlen;
+ uint8_t wh;
+ char name[0];
+} __aligned(8);
+
+static inline int au_rdu_len(int nlen)
+{
+ /* include the terminating NULL */
+ return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1,
+ sizeof(uint64_t));
+}
+
+union au_rdu_ent_ul {
+ struct au_rdu_ent __user *e;
+ uint64_t ul;
+};
+
+enum {
+ AufsCtlRduV_SZ,
+ AufsCtlRduV_End
+};
+
+struct aufs_rdu {
+ /* input */
+ union {
+ uint64_t sz; /* AuCtl_RDU */
+ uint64_t nent; /* AuCtl_RDU_INO */
+ };
+ union au_rdu_ent_ul ent;
+ uint16_t verify[AufsCtlRduV_End];
+
+ /* input/output */
+ uint32_t blk;
+
+ /* output */
+ union au_rdu_ent_ul tail;
+ /* number of entries which were added in a single call */
+ uint64_t rent;
+ uint8_t full;
+ uint8_t shwh;
+
+ struct au_rdu_cookie cookie;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_wbr_fd {
+ uint32_t oflags;
+ int16_t brid;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+struct aufs_ibusy {
+ uint64_t ino, h_ino;
+ int16_t bindex;
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+/* error code for move-down */
+/* the actual message strings are implemented in aufs-util.git */
+enum {
+ EAU_MVDOWN_OPAQUE = 1,
+ EAU_MVDOWN_WHITEOUT,
+ EAU_MVDOWN_UPPER,
+ EAU_MVDOWN_BOTTOM,
+ EAU_MVDOWN_NOUPPER,
+ EAU_MVDOWN_NOLOWERBR,
+ EAU_Last
+};
+
+/* flags for move-down */
+#define AUFS_MVDOWN_DMSG 1
+#define AUFS_MVDOWN_OWLOWER (1 << 1) /* overwrite lower */
+#define AUFS_MVDOWN_KUPPER (1 << 2) /* keep upper */
+#define AUFS_MVDOWN_ROLOWER (1 << 3) /* do even if lower is RO */
+#define AUFS_MVDOWN_ROLOWER_R (1 << 4) /* did on lower RO */
+#define AUFS_MVDOWN_ROUPPER (1 << 5) /* do even if upper is RO */
+#define AUFS_MVDOWN_ROUPPER_R (1 << 6) /* did on upper RO */
+#define AUFS_MVDOWN_BRID_UPPER (1 << 7) /* upper brid */
+#define AUFS_MVDOWN_BRID_LOWER (1 << 8) /* lower brid */
+#define AUFS_MVDOWN_FHSM_LOWER (1 << 9) /* find fhsm attr for lower */
+#define AUFS_MVDOWN_STFS (1 << 10) /* req. stfs */
+#define AUFS_MVDOWN_STFS_FAILED (1 << 11) /* output: stfs is unusable */
+#define AUFS_MVDOWN_BOTTOM (1 << 12) /* output: no more lowers */
+
+/* index for move-down */
+enum {
+ AUFS_MVDOWN_UPPER,
+ AUFS_MVDOWN_LOWER,
+ AUFS_MVDOWN_NARRAY
+};
+
+/*
+ * additional info of move-down
+ * number of free blocks and inodes.
+ * subset of struct kstatfs, but smaller and always 64bit.
+ */
+struct aufs_stfs {
+ uint64_t f_blocks;
+ uint64_t f_bavail;
+ uint64_t f_files;
+ uint64_t f_ffree;
+};
+
+struct aufs_stbr {
+ int16_t brid; /* optional input */
+ int16_t bindex; /* output */
+ struct aufs_stfs stfs; /* output when AUFS_MVDOWN_STFS set */
+} __aligned(8);
+
+struct aufs_mvdown {
+ uint32_t flags; /* input/output */
+ struct aufs_stbr stbr[AUFS_MVDOWN_NARRAY]; /* input/output */
+ int8_t au_errno; /* output */
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+union aufs_brinfo {
+ /* PATH_MAX may differ between kernel-space and user-space */
+ char _spacer[4096];
+ struct {
+ int16_t id;
+ int perm;
+ char path[0];
+ };
+} __aligned(8);
+
+/* ---------------------------------------------------------------------- */
+
+#define AuCtlType 'A'
+#define AUFS_CTL_RDU _IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu)
+#define AUFS_CTL_RDU_INO _IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu)
+#define AUFS_CTL_WBR_FD _IOW(AuCtlType, AuCtl_WBR_FD, \
+ struct aufs_wbr_fd)
+#define AUFS_CTL_IBUSY _IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy)
+#define AUFS_CTL_MVDOWN _IOWR(AuCtlType, AuCtl_MVDOWN, \
+ struct aufs_mvdown)
+#define AUFS_CTL_BRINFO _IOW(AuCtlType, AuCtl_BR, union aufs_brinfo)
+#define AUFS_CTL_FHSM_FD _IOW(AuCtlType, AuCtl_FHSM_FD, int)
+
+#endif /* __AUFS_TYPE_H__ */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index cc89ddefa..f63e1cd82 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -37,9 +37,16 @@
#define SCHED_FIFO 1
#define SCHED_RR 2
#define SCHED_BATCH 3
-/* SCHED_ISO: reserved but not implemented yet */
+/* SCHED_ISO: Implemented on BFS only */
#define SCHED_IDLE 5
+#ifdef CONFIG_SCHED_BFS
+#define SCHED_ISO 4
+#define SCHED_IDLEPRIO SCHED_IDLE
+#define SCHED_MAX (SCHED_IDLEPRIO)
+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
+#else /* CONFIG_SCHED_BFS */
#define SCHED_DEADLINE 6
+#endif /* CONFIG_SCHED_BFS */
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK 0x40000000
diff --git a/init/Kconfig b/init/Kconfig
index ec251fadb..0d6bb5cf4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -44,7 +44,7 @@ config PCK_INTERACTIVE
NCQ Queue Depth................: 31 -> 8
Block Layer Queue Depth........: 128 -> 16
- --- CPU Scheduler --------------------------------------
+ --- CPU Scheduler (CFS) --------------------------------
Scheduling latency.............: 6 -> 3 ms
Minimal granularity............: 0.75 -> 0.3 ms
@@ -52,10 +52,38 @@ config PCK_INTERACTIVE
CPU migration cost.............: 0.5 -> 0.25 ms
Bandwidth slice size...........: 5 -> 3 ms
+ --- CPU Scheduler (BFS) --------------------------------
+
+ Scheduling interval............: 6 -> 3 ms
+ ISO task max realtime use......: 70 % -> 25 %
+
--- CPU Frequency Scaling ------------------------------
Ondemand down scaling factor...: 1 -> 10
+config SCHED_BFS
+ bool "BFS cpu scheduler"
+ default n
+ help
+ The Brain Fuck CPU Scheduler for excellent interactivity and
+ responsiveness on the desktop and solid scalability on normal
+ hardware and commodity servers. Not recommended for 4096 CPUs.
+
+ Currently incompatible with the Group CPU scheduler, and RCU TORTURE
+ TEST so these options are disabled.
+
+config SCHED_BFS_AUTOISO
+ bool "Automatically use SCHED_ISO policy for X"
+ depends on SCHED_BFS
+ default n
+ help
+ Selecting this option will automatically use the SCHED_ISO scheduling
+ policy for X, resulting in an interactivity boost. This *may* cause
+ things like skipping sound on audio applications that are not run
+ as SCHED_ISO.
+
+ Tasks (including X) can be run as sched_iso manually using schedtool.
+
config BROKEN
bool
@@ -363,7 +391,7 @@ choice
# Kind of a stub config for the pure tick based cputime accounting
config TICK_CPU_ACCOUNTING
bool "Simple tick based cputime accounting"
- depends on !S390 && !NO_HZ_FULL
+ depends on !S390 && !NO_HZ_FULL && !SCHED_BFS
help
This is the basic tick based cputime accounting that maintains
statistics about user, system and idle time spent on per jiffies
@@ -388,6 +416,7 @@ config VIRT_CPU_ACCOUNTING_GEN
bool "Full dynticks CPU time accounting"
depends on HAVE_CONTEXT_TRACKING
depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
+ depends on !SCHED_BFS
select VIRT_CPU_ACCOUNTING
select CONTEXT_TRACKING
help
@@ -722,6 +751,7 @@ config RCU_NOCB_CPU
bool "Offload RCU callback processing from boot-selected CPUs"
depends on TREE_RCU || PREEMPT_RCU
depends on RCU_EXPERT || NO_HZ_FULL
+ depends on !SCHED_BFS
default n
help
Use this option to reduce OS jitter for aggressive HPC or
@@ -932,6 +962,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION
+ depends on !SCHED_BFS
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
@@ -1030,6 +1061,7 @@ config CGROUP_WRITEBACK
menuconfig CGROUP_SCHED
bool "CPU controller"
+ depends on !SCHED_BFS
default n
help
This feature lets CPU scheduler recognize task groups and control CPU
@@ -1132,6 +1164,7 @@ config CGROUP_DEVICE
config CGROUP_CPUACCT
bool "Simple CPU accounting controller"
+ depends on !SCHED_BFS
help
Provides a simple controller for monitoring the
total CPU consumed by the tasks in a cgroup.
@@ -1230,6 +1263,7 @@ endif # NAMESPACES
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
+ depends on !SCHED_BFS
select CGROUPS
select CGROUP_SCHED
select FAIR_GROUP_SCHED
@@ -1744,6 +1778,7 @@ choice
This option allows to select a slab allocator.
config SLAB
+ depends on !SCHED_BFS
bool "SLAB"
help
The regular slab allocator that is established and known to work
@@ -1761,7 +1796,7 @@ config SLUB
a slab allocator.
config SLOB
- depends on EXPERT
+ depends on EXPERT && !SCHED_BFS
bool "SLOB (Simple Allocator)"
help
SLOB replaces the stock allocator with a drastically simpler
diff --git a/init/main.c b/init/main.c
index 58c9e3747..055106098 100644
--- a/init/main.c
+++ b/init/main.c
@@ -808,7 +808,6 @@ int __init_or_module do_one_initcall(initcall_t fn)
return ret;
}
-
extern initcall_t __initcall_start[];
extern initcall_t __initcall0_start[];
extern initcall_t __initcall1_start[];
@@ -945,6 +944,8 @@ static int __ref kernel_init(void *unused)
rcu_end_inkernel_boot();
+ print_scheduler_version();
+
if (ramdisk_execute_command) {
ret = run_init_process(ramdisk_execute_command);
if (!ret)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d27904c19..6a498daf2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2474,6 +2474,14 @@ static void cgroup_migrate_add_src(struct css_set *src_cset,
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
+ /*
+ * If ->dead, @src_set is associated with one or more dead cgroups
+ * and doesn't contain any migratable tasks. Ignore it early so
+ * that the rest of migration path doesn't get confused by it.
+ */
+ if (src_cset->dead)
+ return;
+
src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
if (!list_empty(&src_cset->mg_preload_node))
@@ -5114,6 +5122,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
{
struct cgroup_subsys_state *css;
+ struct cgrp_cset_link *link;
int ssid;
lockdep_assert_held(&cgroup_mutex);
@@ -5134,11 +5143,18 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
return -EBUSY;
/*
- * Mark @cgrp dead. This prevents further task migration and child
- * creation by disabling cgroup_lock_live_group().
+ * Mark @cgrp and the associated csets dead. The former prevents
+ * further task migration and child creation by disabling
+ * cgroup_lock_live_group(). The latter makes the csets ignored by
+ * the migration path.
*/
cgrp->self.flags &= ~CSS_ONLINE;
+ spin_lock_bh(&css_set_lock);
+ list_for_each_entry(link, &cgrp->cset_links, cset_link)
+ link->cset->dead = true;
+ spin_unlock_bh(&css_set_lock);
+
/* initiate massacre of all css's */
for_each_css(css, ssid, cgrp)
kill_css(css);
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 435c14a45..a80d56dc7 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -104,7 +104,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
*/
t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
- t3 = tsk->se.sum_exec_runtime;
+ t3 = tsk_seruntime(tsk);
d->cpu_count += t1;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 614614821..f0b4b328d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8001,6 +8001,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
}
}
+ /* symmetric to unaccount_event() in _free_event() */
+ account_event(event);
+
return event;
err_per_task:
@@ -8364,8 +8367,6 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
- account_event(event);
-
/*
* Special case software events and allow them to be part of
* any hardware group.
@@ -8662,8 +8663,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
/* Mark owner so we could distinguish it from user events. */
event->owner = TASK_TOMBSTONE;
- account_event(event);
-
ctx = find_get_context(event->pmu, task, event);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
diff --git a/kernel/exit.c b/kernel/exit.c
index 10e088237..ac8f9b7d6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -133,7 +133,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+ sig->sum_sched_runtime += tsk_seruntime(tsk);
sig->nr_threads--;
__unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index bb061d0ea..8fbec37c5 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -464,7 +464,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
struct inode *inode = file_inode(file);
struct address_space *mapping = file->f_mapping;
- get_file(file);
+ vma_get_file(tmp);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
i_mmap_lock_write(mapping);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 153e51db5..ba2dda520 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -339,6 +339,7 @@ int hibernation_snapshot(int platform_mode)
pm_message_t msg;
int error;
+ pm_suspend_clear_flags();
error = platform_begin(platform_mode);
if (error)
goto Close;
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 7d4cba227..15e398808 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -11,11 +11,16 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
+ifdef CONFIG_SCHED_BFS
+obj-y += bfs.o clock.o
+else
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o
-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-$(CONFIG_SMP) += cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
-obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
+endif
+obj-y += wait.o swait.o completion.o idle.o
+obj-$(CONFIG_SMP) += cpupri.o
+obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/bfs.c b/kernel/sched/bfs.c
new file mode 100644
index 000000000..698007a9f
--- /dev/null
+++ b/kernel/sched/bfs.c
@@ -0,0 +1,7609 @@
+/*
+ * kernel/sched/bfs.c, was kernel/sched.c
+ *
+ * Kernel scheduler and related syscalls
+ *
+ * Copyright (C) 1991-2002 Linus Torvalds
+ *
+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
+ * make semaphores SMP safe
+ * 1998-11-19 Implemented schedule_timeout() and related stuff
+ * by Andrea Arcangeli
+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
+ * hybrid priority-list and round-robin design with
+ * an array-switch method of distributing timeslices
+ * and per-CPU runqueues. Cleanups and useful suggestions
+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
+ * 2003-09-03 Interactivity tuning by Con Kolivas.
+ * 2004-04-02 Scheduler domains code by Nick Piggin
+ * 2007-04-15 Work begun on replacing all interactivity tuning with a
+ * fair scheduling design by Con Kolivas.
+ * 2007-05-05 Load balancing (smp-nice) and other improvements
+ * by Peter Williams
+ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
+ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
+ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
+ * Thomas Gleixner, Mike Kravetz
+ * now Brainfuck deadline scheduling policy by Con Kolivas deletes
+ * a whole lot of those previous things.
+ */
+
+#include <linux/kasan.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nmi.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/highmem.h>
+#include <asm/mmu_context.h>
+#include <linux/interrupt.h>
+#include <linux/capability.h>
+#include <linux/completion.h>
+#include <linux/kernel_stat.h>
+#include <linux/debug_locks.h>
+#include <linux/perf_event.h>
+#include <linux/security.h>
+#include <linux/notifier.h>
+#include <linux/profile.h>
+#include <linux/freezer.h>
+#include <linux/vmalloc.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/timer.h>
+#include <linux/rcupdate.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/sched/sysctl.h>
+#include <linux/times.h>
+#include <linux/tsacct_kern.h>
+#include <linux/kprobes.h>
+#include <linux/delayacct.h>
+#include <linux/log2.h>
+#include <linux/bootmem.h>
+#include <linux/ftrace.h>
+#include <linux/slab.h>
+#include <linux/init_task.h>
+#include <linux/binfmts.h>
+#include <linux/context_tracking.h>
+#include <linux/sched/prio.h>
+#include <linux/tick.h>
+
+#include <asm/irq_regs.h>
+#include <asm/switch_to.h>
+#include <asm/tlb.h>
+#include <asm/unistd.h>
+#include <asm/mutex.h>
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
+
+#include "cpupri.h"
+#include "../workqueue_internal.h"
+#include "../smpboot.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched.h>
+
+#include "bfs_sched.h"
+
+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
+#define rt_task(p) rt_prio((p)->prio)
+#define rt_queue(rq) rt_prio((rq)->rq_prio)
+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
+#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
+ (policy) == SCHED_RR)
+#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
+
+#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO)
+#define idleprio_task(p) unlikely(is_idle_policy((p)->policy))
+#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO)
+#define idle_queue(rq) (unlikely(is_idle_policy((rq)->rq_policy)))
+
+#define is_iso_policy(policy) ((policy) == SCHED_ISO)
+#define iso_task(p) unlikely(is_iso_policy((p)->policy))
+#define iso_queue(rq) unlikely(is_iso_policy((rq)->rq_policy))
+#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO)
+#define rq_running_iso(rq) ((rq)->rq_prio == ISO_PRIO)
+
+#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
+
+#define ISO_PERIOD ((5 * HZ * grq.noc) + 1)
+
+#define SCHED_PRIO(p) ((p) + MAX_RT_PRIO)
+#define STOP_PRIO (MAX_RT_PRIO - 1)
+
+/*
+ * Some helpers for converting to/from various scales. Use shifts to get
+ * approximate multiples of ten for less overhead.
+ */
+#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
+#define JIFFY_NS (1000000000 / HZ)
+#define HALF_JIFFY_NS (1000000000 / HZ / 2)
+#define HALF_JIFFY_US (1000000 / HZ / 2)
+#define MS_TO_NS(TIME) ((TIME) << 20)
+#define MS_TO_US(TIME) ((TIME) << 10)
+#define NS_TO_MS(TIME) ((TIME) >> 20)
+#define NS_TO_US(TIME) ((TIME) >> 10)
+
+#define RESCHED_US (100) /* Reschedule if less than this many μs left */
+
+void print_scheduler_version(void)
+{
+ printk(KERN_INFO "BFS CPU scheduler v0.469 by Con Kolivas.\n");
+}
+
+/*
+ * This is the time all tasks within the same priority round robin.
+ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus.
+ * Tunable via /proc interface.
+ */
+#ifdef CONFIG_PCK_INTERACTIVE
+int rr_interval __read_mostly = 3;
+#else
+int rr_interval __read_mostly = 6;
+#endif
+
+/* Tunable to choose whether to prioritise latency or throughput, simple
+ * binary yes or no */
+
+int sched_interactive __read_mostly = 1;
+
+/*
+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
+ * are allowed to run five seconds as real time tasks. This is the total over
+ * all online cpus.
+ */
+#ifdef CONFIG_PCK_INTERACTIVE
+int sched_iso_cpu __read_mostly = 25;
+#else
+int sched_iso_cpu __read_mostly = 70;
+#endif
+
+/*
+ * The relative length of deadline for each priority(nice) level.
+ */
+static int prio_ratios[NICE_WIDTH] __read_mostly;
+
+/*
+ * The quota handed out to tasks of all priority levels when refilling their
+ * time_slice.
+ */
+static inline int timeslice(void)
+{
+ return MS_TO_US(rr_interval);
+}
+
+/*
+ * The global runqueue data that all CPUs work off. Data is protected either
+ * by the global grq lock, or the discrete lock that precedes the data in this
+ * struct.
+ */
+struct global_rq {
+ raw_spinlock_t lock;
+ unsigned long nr_running;
+ unsigned long nr_uninterruptible;
+ unsigned long long nr_switches;
+ struct list_head queue[PRIO_LIMIT];
+ DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1);
+ unsigned long qnr; /* queued not running */
+#ifdef CONFIG_SMP
+ cpumask_t cpu_idle_map;
+ bool idle_cpus;
+#endif
+ int noc; /* num_online_cpus stored and updated when it changes */
+ u64 niffies; /* Nanosecond jiffies */
+ unsigned long last_jiffy; /* Last jiffy we updated niffies */
+
+ raw_spinlock_t iso_lock;
+ int iso_ticks;
+ bool iso_refractory;
+};
+
+#ifdef CONFIG_SMP
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+ atomic_t refcount;
+ atomic_t rto_count;
+ struct rcu_head rcu;
+ cpumask_var_t span;
+ cpumask_var_t online;
+
+ /*
+ * The "RT overload" flag: it gets set if a CPU has more than
+ * one runnable RT task.
+ */
+ cpumask_var_t rto_mask;
+ struct cpupri cpupri;
+};
+
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+static struct root_domain def_root_domain;
+
+#endif /* CONFIG_SMP */
+
+/* There can be only one */
+static struct global_rq grq;
+
+static DEFINE_MUTEX(sched_hotcpu_mutex);
+
+/* cpus with isolated domains */
+cpumask_var_t cpu_isolated_map;
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+#ifdef CONFIG_SMP
+struct rq *cpu_rq(int cpu)
+{
+ return &per_cpu(runqueues, (cpu));
+}
+#define task_rq(p) cpu_rq(task_cpu(p))
+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+/*
+ * sched_domains_mutex serialises calls to init_sched_domains,
+ * detach_destroy_domains and partition_sched_domains.
+ */
+DEFINE_MUTEX(sched_domains_mutex);
+
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+static struct root_domain def_root_domain;
+
+int __weak arch_sd_sibling_asym_packing(void)
+{
+ return 0*SD_ASYM_PACKING;
+}
+#else
+struct rq *uprq;
+#endif /* CONFIG_SMP */
+
+static inline void update_rq_clock(struct rq *rq);
+
+/*
+ * Sanity check should sched_clock return bogus values. We make sure it does
+ * not appear to go backwards, and use jiffies to determine the maximum and
+ * minimum it could possibly have increased, and round down to the nearest
+ * jiffy when it falls outside this.
+ */
+static inline void niffy_diff(s64 *niff_diff, int jiff_diff)
+{
+ unsigned long min_diff, max_diff;
+
+ if (jiff_diff > 1)
+ min_diff = JIFFIES_TO_NS(jiff_diff - 1);
+ else
+ min_diff = 1;
+ /* Round up to the nearest tick for maximum */
+ max_diff = JIFFIES_TO_NS(jiff_diff + 1);
+
+ if (unlikely(*niff_diff < min_diff || *niff_diff > max_diff))
+ *niff_diff = min_diff;
+}
+
+#ifdef CONFIG_SMP
+static inline int cpu_of(struct rq *rq)
+{
+ return rq->cpu;
+}
+
+/*
+ * Niffies are a globally increasing nanosecond counter. Whenever a runqueue
+ * clock is updated with the grq.lock held, it is an opportunity to update the
+ * niffies value. Any CPU can update it by adding how much its clock has
+ * increased since it last updated niffies, minus any added niffies by other
+ * CPUs.
+ */
+static inline void update_clocks(struct rq *rq)
+{
+ s64 ndiff;
+ long jdiff;
+
+ update_rq_clock(rq);
+ ndiff = rq->clock - rq->old_clock;
+ /* old_clock is only updated when we are updating niffies */
+ rq->old_clock = rq->clock;
+ ndiff -= grq.niffies - rq->last_niffy;
+ jdiff = jiffies - grq.last_jiffy;
+ niffy_diff(&ndiff, jdiff);
+ grq.last_jiffy += jdiff;
+ grq.niffies += ndiff;
+ rq->last_niffy = grq.niffies;
+}
+#else /* CONFIG_SMP */
+static inline int cpu_of(struct rq *rq)
+{
+ return 0;
+}
+
+static inline void update_clocks(struct rq *rq)
+{
+ s64 ndiff;
+ long jdiff;
+
+ update_rq_clock(rq);
+ ndiff = rq->clock - rq->old_clock;
+ rq->old_clock = rq->clock;
+ jdiff = jiffies - grq.last_jiffy;
+ niffy_diff(&ndiff, jdiff);
+ grq.last_jiffy += jdiff;
+ grq.niffies += ndiff;
+}
+#endif
+
+#include "stats.h"
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next) do { } while (0)
+#endif
+#ifndef finish_arch_switch
+# define finish_arch_switch(prev) do { } while (0)
+#endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch() do { } while (0)
+#endif
+
+/*
+ * All common locking functions performed on grq.lock. rq->clock is local to
+ * the CPU accessing it so it can be modified just with interrupts disabled
+ * when we're not updating niffies.
+ * Looking up task_rq must be done under grq.lock to be safe.
+ */
+static void update_rq_clock_task(struct rq *rq, s64 delta);
+
+static inline void update_rq_clock(struct rq *rq)
+{
+ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+
+ if (unlikely(delta < 0))
+ return;
+ rq->clock += delta;
+ update_rq_clock_task(rq, delta);
+}
+
+static inline bool task_running(struct task_struct *p)
+{
+ return p->on_cpu;
+}
+
+static inline void grq_lock(void)
+ __acquires(grq.lock)
+{
+ raw_spin_lock(&grq.lock);
+}
+
+static inline void grq_unlock(void)
+ __releases(grq.lock)
+{
+ raw_spin_unlock(&grq.lock);
+}
+
+static inline void grq_lock_irq(void)
+ __acquires(grq.lock)
+{
+ raw_spin_lock_irq(&grq.lock);
+}
+
+static inline void time_lock_grq(struct rq *rq)
+ __acquires(grq.lock)
+{
+ grq_lock();
+ update_clocks(rq);
+}
+
+static inline void grq_unlock_irq(void)
+ __releases(grq.lock)
+{
+ raw_spin_unlock_irq(&grq.lock);
+}
+
+static inline void grq_lock_irqsave(unsigned long *flags)
+ __acquires(grq.lock)
+{
+ raw_spin_lock_irqsave(&grq.lock, *flags);
+}
+
+static inline void grq_unlock_irqrestore(unsigned long *flags)
+ __releases(grq.lock)
+{
+ raw_spin_unlock_irqrestore(&grq.lock, *flags);
+}
+
+static inline struct rq
+*task_grq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(grq.lock)
+{
+ grq_lock_irqsave(flags);
+ return task_rq(p);
+}
+
+static inline struct rq
+*time_task_grq_lock(struct task_struct *p, unsigned long *flags)
+ __acquires(grq.lock)
+{
+ struct rq *rq = task_grq_lock(p, flags);
+ update_clocks(rq);
+ return rq;
+}
+
+static inline struct rq *task_grq_lock_irq(struct task_struct *p)
+ __acquires(grq.lock)
+{
+ grq_lock_irq();
+ return task_rq(p);
+}
+
+static inline void time_task_grq_lock_irq(struct task_struct *p)
+ __acquires(grq.lock)
+{
+ struct rq *rq = task_grq_lock_irq(p);
+ update_clocks(rq);
+}
+
+static inline void task_grq_unlock_irq(void)
+ __releases(grq.lock)
+{
+ grq_unlock_irq();
+}
+
+static inline void task_grq_unlock(unsigned long *flags)
+ __releases(grq.lock)
+{
+ grq_unlock_irqrestore(flags);
+}
+
+/**
+ * grunqueue_is_locked
+ *
+ * Returns true if the global runqueue is locked.
+ * This interface allows printk to be called with the runqueue lock
+ * held and know whether or not it is OK to wake up the klogd.
+ */
+bool grunqueue_is_locked(void)
+{
+ return raw_spin_is_locked(&grq.lock);
+}
+
+void grq_unlock_wait(void)
+ __releases(grq.lock)
+{
+ smp_mb(); /* spin-unlock-wait is not a full memory barrier */
+ raw_spin_unlock_wait(&grq.lock);
+}
+
+static inline void time_grq_lock(struct rq *rq, unsigned long *flags)
+ __acquires(grq.lock)
+{
+ local_irq_save(*flags);
+ time_lock_grq(rq);
+}
+
+static inline struct rq *__task_grq_lock(struct task_struct *p)
+ __acquires(grq.lock)
+{
+ grq_lock();
+ return task_rq(p);
+}
+
+static inline void __task_grq_unlock(void)
+ __releases(grq.lock)
+{
+ grq_unlock();
+}
+
+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+}
+
+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ grq.lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_);
+
+ grq_unlock_irq();
+}
+
+static inline bool deadline_before(u64 deadline, u64 time)
+{
+ return (deadline < time);
+}
+
+static inline bool deadline_after(u64 deadline, u64 time)
+{
+ return (deadline > time);
+}
+
+/*
+ * A task that is queued but not running will be on the grq run list.
+ * A task that is not running or queued will not be on the grq run list.
+ * A task that is currently running will have ->on_cpu set but not on the
+ * grq run list.
+ */
+static inline bool task_queued(struct task_struct *p)
+{
+ return (!list_empty(&p->run_list));
+}
+
+/*
+ * Removing from the global runqueue. Enter with grq locked.
+ */
+static void dequeue_task(struct task_struct *p)
+{
+ list_del_init(&p->run_list);
+ if (list_empty(grq.queue + p->prio))
+ __clear_bit(p->prio, grq.prio_bitmap);
+ sched_info_dequeued(task_rq(p), p);
+}
+
+/*
+ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as
+ * an idle task, we ensure none of the following conditions are met.
+ */
+static bool idleprio_suitable(struct task_struct *p)
+{
+ return (!freezing(p) && !signal_pending(p) &&
+ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)));
+}
+
+/*
+ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check
+ * that the iso_refractory flag is not set.
+ */
+static bool isoprio_suitable(void)
+{
+ return !grq.iso_refractory;
+}
+
+/*
+ * Adding to the global runqueue. Enter with grq locked.
+ */
+static void enqueue_task(struct task_struct *p, struct rq *rq)
+{
+ if (!rt_task(p)) {
+ /* Check it hasn't gotten rt from PI */
+ if ((idleprio_task(p) && idleprio_suitable(p)) ||
+ (iso_task(p) && isoprio_suitable()))
+ p->prio = p->normal_prio;
+ else
+ p->prio = NORMAL_PRIO;
+ }
+ __set_bit(p->prio, grq.prio_bitmap);
+ list_add_tail(&p->run_list, grq.queue + p->prio);
+ sched_info_queued(rq, p);
+}
+
+static inline void requeue_task(struct task_struct *p)
+{
+ sched_info_queued(task_rq(p), p);
+}
+
+/*
+ * Returns the relative length of deadline all compared to the shortest
+ * deadline which is that of nice -20.
+ */
+static inline int task_prio_ratio(struct task_struct *p)
+{
+ return prio_ratios[TASK_USER_PRIO(p)];
+}
+
+/*
+ * task_timeslice - all tasks of all priorities get the exact same timeslice
+ * length. CPU distribution is handled by giving different deadlines to
+ * tasks of different priorities. Use 128 as the base value for fast shifts.
+ */
+static inline int task_timeslice(struct task_struct *p)
+{
+ return (rr_interval * task_prio_ratio(p) / 128);
+}
+
+static void resched_task(struct task_struct *p);
+
+static inline void resched_curr(struct rq *rq)
+{
+ resched_task(rq->curr);
+}
+
+/*
+ * qnr is the "queued but not running" count which is the total number of
+ * tasks on the global runqueue list waiting for cpu time but not actually
+ * currently running on a cpu.
+ */
+static inline void inc_qnr(void)
+{
+ grq.qnr++;
+}
+
+static inline void dec_qnr(void)
+{
+ grq.qnr--;
+}
+
+static inline int queued_notrunning(void)
+{
+ return grq.qnr;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to
+ * allow easy lookup of whether any suitable idle CPUs are available.
+ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the
+ * idle_cpus variable than to do a full bitmask check when we are busy.
+ */
+static inline void set_cpuidle_map(int cpu)
+{
+ if (likely(cpu_online(cpu))) {
+ cpumask_set_cpu(cpu, &grq.cpu_idle_map);
+ grq.idle_cpus = true;
+ }
+}
+
+static inline void clear_cpuidle_map(int cpu)
+{
+ cpumask_clear_cpu(cpu, &grq.cpu_idle_map);
+ if (cpumask_empty(&grq.cpu_idle_map))
+ grq.idle_cpus = false;
+}
+
+static bool suitable_idle_cpus(struct task_struct *p)
+{
+ if (!grq.idle_cpus)
+ return false;
+ return (cpumask_intersects(&p->cpus_allowed, &grq.cpu_idle_map));
+}
+
+#define CPUIDLE_DIFF_THREAD (1)
+#define CPUIDLE_DIFF_CORE (2)
+#define CPUIDLE_CACHE_BUSY (4)
+#define CPUIDLE_DIFF_CPU (8)
+#define CPUIDLE_THREAD_BUSY (16)
+#define CPUIDLE_THROTTLED (32)
+#define CPUIDLE_DIFF_NODE (64)
+
+static inline bool scaling_rq(struct rq *rq);
+
+/*
+ * The best idle CPU is chosen according to the CPUIDLE ranking above where the
+ * lowest value would give the most suitable CPU to schedule p onto next. The
+ * order works out to be the following:
+ *
+ * Same core, idle or busy cache, idle or busy threads
+ * Other core, same cache, idle or busy cache, idle threads.
+ * Same node, other CPU, idle cache, idle threads.
+ * Same node, other CPU, busy cache, idle threads.
+ * Other core, same cache, busy threads.
+ * Same node, other CPU, busy threads.
+ * Other node, other CPU, idle cache, idle threads.
+ * Other node, other CPU, busy cache, idle threads.
+ * Other node, other CPU, busy threads.
+ */
+static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
+{
+ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THROTTLED |
+ CPUIDLE_THREAD_BUSY | CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY |
+ CPUIDLE_DIFF_CORE | CPUIDLE_DIFF_THREAD;
+ int cpu_tmp;
+
+ if (cpumask_test_cpu(best_cpu, tmpmask))
+ goto out;
+
+ for_each_cpu(cpu_tmp, tmpmask) {
+ int ranking, locality;
+ struct rq *tmp_rq;
+
+ ranking = 0;
+ tmp_rq = cpu_rq(cpu_tmp);
+
+ locality = rq->cpu_locality[cpu_tmp];
+#ifdef CONFIG_NUMA
+ if (locality > 3)
+ ranking |= CPUIDLE_DIFF_NODE;
+ else
+#endif
+ if (locality > 2)
+ ranking |= CPUIDLE_DIFF_CPU;
+#ifdef CONFIG_SCHED_MC
+ else if (locality == 2)
+ ranking |= CPUIDLE_DIFF_CORE;
+ if (!(tmp_rq->cache_idle(cpu_tmp)))
+ ranking |= CPUIDLE_CACHE_BUSY;
+#endif
+#ifdef CONFIG_SCHED_SMT
+ if (locality == 1)
+ ranking |= CPUIDLE_DIFF_THREAD;
+ if (!(tmp_rq->siblings_idle(cpu_tmp)))
+ ranking |= CPUIDLE_THREAD_BUSY;
+#endif
+ if (scaling_rq(tmp_rq))
+ ranking |= CPUIDLE_THROTTLED;
+
+ if (ranking < best_ranking) {
+ best_cpu = cpu_tmp;
+ best_ranking = ranking;
+ }
+ }
+out:
+ return best_cpu;
+}
+
+static void resched_best_mask(int best_cpu, struct rq *rq, cpumask_t *tmpmask)
+{
+ best_cpu = best_mask_cpu(best_cpu, rq, tmpmask);
+ resched_curr(cpu_rq(best_cpu));
+}
+
+bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ struct rq *this_rq = cpu_rq(this_cpu);
+
+ return (this_rq->cpu_locality[that_cpu] < 3);
+}
+
+#ifdef CONFIG_SCHED_SMT
+#ifdef CONFIG_SMT_NICE
+static const cpumask_t *thread_cpumask(int cpu);
+
+/* Find the best real time priority running on any SMT siblings of cpu and if
+ * none are running, the static priority of the best deadline task running.
+ * The lookups to the other runqueues is done lockless as the occasional wrong
+ * value would be harmless. */
+static int best_smt_bias(int cpu)
+{
+ int other_cpu, best_bias = 0;
+
+ for_each_cpu(other_cpu, thread_cpumask(cpu)) {
+ struct rq *rq;
+
+ if (other_cpu == cpu)
+ continue;
+ rq = cpu_rq(other_cpu);
+ if (rq_idle(rq))
+ continue;
+ if (!rq->online)
+ continue;
+ if (!rq->rq_mm)
+ continue;
+ if (likely(rq->rq_smt_bias > best_bias))
+ best_bias = rq->rq_smt_bias;
+ }
+ return best_bias;
+}
+
+static int task_prio_bias(struct task_struct *p)
+{
+ if (rt_task(p))
+ return 1 << 30;
+ else if (task_running_iso(p))
+ return 1 << 29;
+ else if (task_running_idle(p))
+ return 0;
+ return MAX_PRIO - p->static_prio;
+}
+
+/* We've already decided p can run on CPU, now test if it shouldn't for SMT
+ * nice reasons. */
+static bool smt_should_schedule(struct task_struct *p, int cpu)
+{
+ int best_bias, task_bias;
+
+ /* Kernel threads always run */
+ if (unlikely(!p->mm))
+ return true;
+ if (rt_task(p))
+ return true;
+ if (!idleprio_suitable(p))
+ return true;
+ best_bias = best_smt_bias(cpu);
+ /* The smt siblings are all idle or running IDLEPRIO */
+ if (best_bias < 1)
+ return true;
+ task_bias = task_prio_bias(p);
+ if (task_bias < 1)
+ return false;
+ if (task_bias >= best_bias)
+ return true;
+ /* Dither 25% cpu of normal tasks regardless of nice difference */
+ if (best_bias % 4 == 1)
+ return true;
+ /* Sorry, you lose */
+ return false;
+}
+#endif
+#endif
+
+static bool resched_best_idle(struct task_struct *p)
+{
+ cpumask_t tmpmask;
+ int best_cpu;
+
+ cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map);
+ best_cpu = best_mask_cpu(task_cpu(p), task_rq(p), &tmpmask);
+#ifdef CONFIG_SMT_NICE
+ if (!smt_should_schedule(p, best_cpu))
+ return false;
+#endif
+ resched_curr(cpu_rq(best_cpu));
+ return true;
+}
+
+static inline void resched_suitable_idle(struct task_struct *p)
+{
+ if (suitable_idle_cpus(p))
+ resched_best_idle(p);
+}
+/*
+ * Flags to tell us whether this CPU is running a CPU frequency governor that
+ * has slowed its speed or not. No locking required as the very rare wrongly
+ * read value would be harmless.
+ */
+void cpu_scaling(int cpu)
+{
+ cpu_rq(cpu)->scaling = true;
+}
+
+void cpu_nonscaling(int cpu)
+{
+ cpu_rq(cpu)->scaling = false;
+}
+
+static inline bool scaling_rq(struct rq *rq)
+{
+ return rq->scaling;
+}
+
+static inline int locality_diff(int cpu, struct rq *rq)
+{
+ return rq->cpu_locality[cpu];
+}
+#else /* CONFIG_SMP */
+static inline void set_cpuidle_map(int cpu)
+{
+}
+
+static inline void clear_cpuidle_map(int cpu)
+{
+}
+
+static inline bool suitable_idle_cpus(struct task_struct *p)
+{
+ return uprq->curr == uprq->idle;
+}
+
+static inline void resched_suitable_idle(struct task_struct *p)
+{
+}
+
+void cpu_scaling(int __unused)
+{
+}
+
+void cpu_nonscaling(int __unused)
+{
+}
+
+/*
+ * Although CPUs can scale in UP, there is nowhere else for tasks to go so this
+ * always returns 0.
+ */
+static inline bool scaling_rq(struct rq *rq)
+{
+ return false;
+}
+
+static inline int locality_diff(int cpu, struct rq *rq)
+{
+ return 0;
+}
+#endif /* CONFIG_SMP */
+EXPORT_SYMBOL_GPL(cpu_scaling);
+EXPORT_SYMBOL_GPL(cpu_nonscaling);
+
+static inline int normal_prio(struct task_struct *p)
+{
+ if (has_rt_policy(p))
+ return MAX_RT_PRIO - 1 - p->rt_priority;
+ if (idleprio_task(p))
+ return IDLE_PRIO;
+ if (iso_task(p))
+ return ISO_PRIO;
+ return NORMAL_PRIO;
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks as it will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(struct task_struct *p)
+{
+ p->normal_prio = normal_prio(p);
+ /*
+ * If we are RT tasks or we were boosted to RT priority,
+ * keep the priority unchanged. Otherwise, update priority
+ * to the normal priority:
+ */
+ if (!rt_prio(p->prio))
+ return p->normal_prio;
+ return p->prio;
+}
+
+/*
+ * activate_task - move a task to the runqueue. Enter with grq locked.
+ */
+static void activate_task(struct task_struct *p, struct rq *rq)
+{
+ update_clocks(rq);
+
+ /*
+ * Sleep time is in units of nanosecs, so shift by 20 to get a
+ * milliseconds-range estimation of the amount of time that the task
+ * spent sleeping:
+ */
+ if (unlikely(prof_on == SLEEP_PROFILING)) {
+ if (p->state == TASK_UNINTERRUPTIBLE)
+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
+ (rq->clock_task - p->last_ran) >> 20);
+ }
+
+ p->prio = effective_prio(p);
+ if (task_contributes_to_load(p))
+ grq.nr_uninterruptible--;
+ enqueue_task(p, rq);
+ rq->soft_affined++;
+ p->on_rq = 1;
+ grq.nr_running++;
+ inc_qnr();
+}
+
+static inline void clear_sticky(struct task_struct *p);
+
+/*
+ * deactivate_task - If it's running, it's not on the grq and we can just
+ * decrement the nr_running. Enter with grq locked.
+ */
+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
+{
+ if (task_contributes_to_load(p))
+ grq.nr_uninterruptible++;
+ rq->soft_affined--;
+ p->on_rq = 0;
+ grq.nr_running--;
+ clear_sticky(p);
+}
+
+#ifdef CONFIG_SMP
+void set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+#ifdef CONFIG_LOCKDEP
+ /*
+ * The caller should hold grq lock.
+ */
+ WARN_ON_ONCE(debug_locks && !lockdep_is_held(&grq.lock));
+#endif
+ if (task_cpu(p) == cpu)
+ return;
+ trace_sched_migrate_task(p, cpu);
+ perf_event_task_migrate(p);
+
+ /*
+ * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be
+ * successfully executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
+ if (p->on_rq) {
+ task_rq(p)->soft_affined--;
+ cpu_rq(cpu)->soft_affined++;
+ }
+ task_thread_info(p)->cpu = cpu;
+}
+
+static inline void clear_sticky(struct task_struct *p)
+{
+ p->sticky = false;
+}
+
+static inline bool task_sticky(struct task_struct *p)
+{
+ return p->sticky;
+}
+
+/* Reschedule the best idle CPU that is not this one. */
+static void
+resched_closest_idle(struct rq *rq, int cpu, struct task_struct *p)
+{
+ cpumask_t tmpmask;
+
+ cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map);
+ cpumask_clear_cpu(cpu, &tmpmask);
+ if (cpumask_empty(&tmpmask))
+ return;
+ resched_best_mask(cpu, rq, &tmpmask);
+}
+
+/*
+ * We set the sticky flag on a task that is descheduled involuntarily meaning
+ * it is awaiting further CPU time. If the last sticky task is still sticky
+ * but unlucky enough to not be the next task scheduled, we unstick it and try
+ * to find it an idle CPU. Realtime tasks do not stick to minimise their
+ * latency at all times.
+ */
+static inline void
+swap_sticky(struct rq *rq, int cpu, struct task_struct *p)
+{
+ if (rq->sticky_task) {
+ if (rq->sticky_task == p) {
+ p->sticky = true;
+ return;
+ }
+ if (task_sticky(rq->sticky_task)) {
+ clear_sticky(rq->sticky_task);
+ resched_closest_idle(rq, cpu, rq->sticky_task);
+ }
+ }
+ if (!rt_task(p)) {
+ p->sticky = true;
+ rq->sticky_task = p;
+ } else {
+ resched_closest_idle(rq, cpu, p);
+ rq->sticky_task = NULL;
+ }
+}
+
+static inline void unstick_task(struct rq *rq, struct task_struct *p)
+{
+ rq->sticky_task = NULL;
+ clear_sticky(p);
+}
+#else
+static inline void clear_sticky(struct task_struct *p)
+{
+}
+
+static inline bool task_sticky(struct task_struct *p)
+{
+ return false;
+}
+
+static inline void
+swap_sticky(struct rq *rq, int cpu, struct task_struct *p)
+{
+}
+
+static inline void unstick_task(struct rq *rq, struct task_struct *p)
+{
+}
+#endif
+
+/*
+ * Move a task off the global queue and take it to a cpu for it will
+ * become the running task.
+ */
+static inline void take_task(int cpu, struct task_struct *p)
+{
+ set_task_cpu(p, cpu);
+ dequeue_task(p);
+ clear_sticky(p);
+ dec_qnr();
+}
+
+/*
+ * Returns a descheduling task to the grq runqueue unless it is being
+ * deactivated.
+ */
+static inline void return_task(struct task_struct *p, struct rq *rq, bool deactivate)
+{
+ if (deactivate)
+ deactivate_task(p, rq);
+ else {
+ inc_qnr();
+ enqueue_task(p, rq);
+ }
+}
+
+/* Enter with grq lock held. We know p is on the local cpu */
+static inline void __set_tsk_resched(struct task_struct *p)
+{
+ set_tsk_need_resched(p);
+ set_preempt_need_resched();
+}
+
+/*
+ * resched_task - mark a task 'to be rescheduled now'.
+ *
+ * On UP this means the setting of the need_resched flag, on SMP it
+ * might also involve a cross-CPU call to trigger the scheduler on
+ * the target CPU.
+ */
+void resched_task(struct task_struct *p)
+{
+ int cpu;
+
+ lockdep_assert_held(&grq.lock);
+
+ if (test_tsk_need_resched(p))
+ return;
+
+ set_tsk_need_resched(p);
+
+ cpu = task_cpu(p);
+ if (cpu == smp_processor_id()) {
+ set_preempt_need_resched();
+ return;
+ }
+
+ smp_send_reschedule(cpu);
+}
+
+/**
+ * task_curr - is this task currently executing on a CPU?
+ * @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
+ */
+inline int task_curr(const struct task_struct *p)
+{
+ return cpu_curr(task_cpu(p)) == p;
+}
+
+#ifdef CONFIG_SMP
+struct migration_req {
+ struct task_struct *task;
+ int dest_cpu;
+};
+
+/*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change. If it changes, i.e. @p might have woken up,
+ * then return zero. When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count). If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
+ * The caller must ensure that the task *will* unschedule sometime soon,
+ * else this function might spin for a *long* time. This function can't
+ * be called with interrupts off, or it may introduce deadlock with
+ * smp_call_function() if an IPI is sent by the same process we are
+ * waiting to become inactive.
+ */
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+{
+ unsigned long flags;
+ bool running, on_rq;
+ unsigned long ncsw;
+ struct rq *rq;
+
+ for (;;) {
+ rq = task_rq(p);
+
+ /*
+ * If the task is actively running on another CPU
+ * still, just relax and busy-wait without holding
+ * any locks.
+ *
+ * NOTE! Since we don't hold any locks, it's not
+ * even sure that "rq" stays as the right runqueue!
+ * But we don't care, since this will return false
+ * if the runqueue has changed and p is actually now
+ * running somewhere else!
+ */
+ while (task_running(p) && p == rq->curr) {
+ if (match_state && unlikely(p->state != match_state))
+ return 0;
+ cpu_relax();
+ }
+
+ /*
+ * Ok, time to look more closely! We need the grq
+ * lock now, to be *sure*. If we're wrong, we'll
+ * just go back and repeat.
+ */
+ rq = task_grq_lock(p, &flags);
+ trace_sched_wait_task(p);
+ running = task_running(p);
+ on_rq = p->on_rq;
+ ncsw = 0;
+ if (!match_state || p->state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_grq_unlock(&flags);
+
+ /*
+ * If it changed from the expected state, bail out now.
+ */
+ if (unlikely(!ncsw))
+ break;
+
+ /*
+ * Was it really running after all now that we
+ * checked with the proper locks actually held?
+ *
+ * Oops. Go back and try again..
+ */
+ if (unlikely(running)) {
+ cpu_relax();
+ continue;
+ }
+
+ /*
+ * It's not enough that it's not actively running,
+ * it must be off the runqueue _entirely_, and not
+ * preempted!
+ *
+ * So if it was still runnable (but just not actively
+ * running right now), it's preempted, and we should
+ * yield - it could be a while.
+ */
+ if (unlikely(on_rq)) {
+ ktime_t to = ktime_set(0, NSEC_PER_SEC / HZ);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
+ continue;
+ }
+
+ /*
+ * Ahh, all good. It wasn't running, and it wasn't
+ * runnable, which means that it will never become
+ * running in the future either. We're all done!
+ */
+ break;
+ }
+
+ return ncsw;
+}
+
+/***
+ * kick_process - kick a running thread to enter/exit the kernel
+ * @p: the to-be-kicked thread
+ *
+ * Cause a process which is running on another CPU to enter
+ * kernel-mode, without any delay. (to get signals handled.)
+ *
+ * NOTE: this function doesn't have to take the runqueue lock,
+ * because all it wants to ensure is that the remote task enters
+ * the kernel. If the IPI races and the task has been migrated
+ * to another CPU then no harm is done and the purpose has been
+ * achieved as well.
+ */
+void kick_process(struct task_struct *p)
+{
+ int cpu;
+
+ preempt_disable();
+ cpu = task_cpu(p);
+ if ((cpu != smp_processor_id()) && task_curr(p))
+ smp_send_reschedule(cpu);
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kick_process);
+#endif
+
+/*
+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
+ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or
+ * between themselves, they cooperatively multitask. An idle rq scores as
+ * prio PRIO_LIMIT so it is always preempted.
+ */
+static inline bool
+can_preempt(struct task_struct *p, int prio, u64 deadline)
+{
+ /* Better static priority RT task or better policy preemption */
+ if (p->prio < prio)
+ return true;
+ if (p->prio > prio)
+ return false;
+ /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */
+ if (!deadline_before(p->deadline, deadline))
+ return false;
+ return true;
+}
+
+#ifdef CONFIG_SMP
+#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Check to see if there is a task that is affined only to offline CPUs but
+ * still wants runtime. This happens to kernel threads during suspend/halt and
+ * disabling of CPUs.
+ */
+static inline bool online_cpus(struct task_struct *p)
+{
+ return (likely(cpumask_intersects(&cpu_online_map, &p->cpus_allowed)));
+}
+#else /* CONFIG_HOTPLUG_CPU */
+/* All available CPUs are always online without hotplug. */
+static inline bool online_cpus(struct task_struct *p)
+{
+ return true;
+}
+#endif
+
+/*
+ * Check to see if p can run on cpu, and if not, whether there are any online
+ * CPUs it can run on instead.
+ */
+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
+{
+ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed)))
+ return true;
+ return false;
+}
+
+/*
+ * When all else is equal, still prefer this_rq.
+ */
+static void try_preempt(struct task_struct *p, struct rq *this_rq)
+{
+ struct rq *highest_prio_rq = NULL;
+ int cpu, highest_prio;
+ u64 latest_deadline;
+ cpumask_t tmp;
+
+ /*
+ * We clear the sticky flag here because for a task to have called
+ * try_preempt with the sticky flag enabled means some complicated
+ * re-scheduling has occurred and we should ignore the sticky flag.
+ */
+ clear_sticky(p);
+
+ if (suitable_idle_cpus(p) && resched_best_idle(p))
+ return;
+
+ /* IDLEPRIO tasks never preempt anything but idle */
+ if (p->policy == SCHED_IDLEPRIO)
+ return;
+
+ if (likely(online_cpus(p)))
+ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed);
+ else
+ return;
+
+ highest_prio = latest_deadline = 0;
+
+ for_each_cpu(cpu, &tmp) {
+ struct rq *rq;
+ int rq_prio;
+
+ rq = cpu_rq(cpu);
+ rq_prio = rq->rq_prio;
+ if (rq_prio < highest_prio)
+ continue;
+
+ if (rq_prio > highest_prio ||
+ deadline_after(rq->rq_deadline, latest_deadline)) {
+ latest_deadline = rq->rq_deadline;
+ highest_prio = rq_prio;
+ highest_prio_rq = rq;
+ }
+ }
+
+ if (likely(highest_prio_rq)) {
+#ifdef CONFIG_SMT_NICE
+ cpu = cpu_of(highest_prio_rq);
+ if (!smt_should_schedule(p, cpu))
+ return;
+#endif
+ if (can_preempt(p, highest_prio, highest_prio_rq->rq_deadline))
+ resched_curr(highest_prio_rq);
+ }
+}
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, bool check);
+#else /* CONFIG_SMP */
+static inline bool needs_other_cpu(struct task_struct *p, int cpu)
+{
+ return false;
+}
+
+static void try_preempt(struct task_struct *p, struct rq *this_rq)
+{
+ if (p->policy == SCHED_IDLEPRIO)
+ return;
+ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline))
+ resched_curr(uprq);
+}
+
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, bool check)
+{
+ return set_cpus_allowed_ptr(p, new_mask);
+}
+#endif /* CONFIG_SMP */
+
+static void
+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
+{
+#ifdef CONFIG_SCHEDSTATS
+ struct rq *rq = this_rq();
+
+#ifdef CONFIG_SMP
+ int this_cpu = smp_processor_id();
+
+ if (cpu == this_cpu)
+ schedstat_inc(rq, ttwu_local);
+ else {
+ struct sched_domain *sd;
+
+ rcu_read_lock();
+ for_each_domain(this_cpu, sd) {
+ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
+ schedstat_inc(sd, ttwu_wake_remote);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+#endif /* CONFIG_SMP */
+
+ schedstat_inc(rq, ttwu_count);
+#endif /* CONFIG_SCHEDSTATS */
+}
+
+void wake_up_if_idle(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ rcu_read_lock();
+
+ if (!is_idle_task(rcu_dereference(rq->curr)))
+ goto out;
+
+ grq_lock_irqsave(&flags);
+ if (likely(is_idle_task(rq->curr)))
+ smp_send_reschedule(cpu);
+ /* Else cpu is not in idle, do nothing here */
+ grq_unlock_irqrestore(&flags);
+
+out:
+ rcu_read_unlock();
+}
+
+#ifdef CONFIG_SMP
+void scheduler_ipi(void)
+{
+ /*
+ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
+ * TIF_NEED_RESCHED remotely (for the first time) will also send
+ * this IPI.
+ */
+ preempt_fold_need_resched();
+}
+#endif
+
+static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
+ bool is_sync)
+{
+ activate_task(p, rq);
+
+ /*
+ * Sync wakeups (i.e. those types of wakeups where the waker
+ * has indicated that it will leave the CPU in short order)
+ * don't trigger a preemption if there are no idle cpus,
+ * instead waiting for current to deschedule.
+ */
+ if (!is_sync || suitable_idle_cpus(p))
+ try_preempt(p, rq);
+}
+
+static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
+ bool success)
+{
+ trace_sched_wakeup(p);
+ p->state = TASK_RUNNING;
+
+ /*
+ * if a worker is waking up, notify workqueue. Note that on BFS, we
+ * don't really know what cpu it will be, so we fake it for
+ * wq_worker_waking_up :/
+ */
+ if ((p->flags & PF_WQ_WORKER) && success)
+ wq_worker_waking_up(p, cpu_of(rq));
+}
+
+/*
+ * wake flags
+ */
+#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
+#define WF_FORK 0x02 /* child wakeup after fork */
+#define WF_MIGRATED 0x4 /* internal use, task got migrated */
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the thread to be awakened
+ * @state: the mask of task states that can be woken
+ * @wake_flags: wake modifier flags (WF_*)
+ *
+ * Put it on the run-queue if it's not already there. The "current"
+ * thread is always on the run-queue (except when the actual
+ * re-schedule is in progress), and as such you're allowed to do
+ * the simpler "current->state = TASK_RUNNING" to mark yourself
+ * runnable without the overhead of this.
+ *
+ * Return: %true if @p was woken up, %false if it was already running.
+ * or @state didn't match @p's state.
+ */
+static bool try_to_wake_up(struct task_struct *p, unsigned int state,
+ int wake_flags)
+{
+ bool success = false;
+ unsigned long flags;
+ struct rq *rq;
+ int cpu;
+
+ get_cpu();
+
+ /*
+ * If we are going to wake up a thread waiting for CONDITION we
+ * need to ensure that CONDITION=1 done by the caller can not be
+ * reordered with p->state check below. This pairs with mb() in
+ * set_current_state() the waiting thread does.
+ */
+ smp_mb__before_spinlock();
+
+ /*
+ * No need to do time_lock_grq as we only need to update the rq clock
+ * if we activate the task
+ */
+ rq = task_grq_lock(p, &flags);
+ cpu = task_cpu(p);
+
+ /* state is a volatile long, どうして、分からない */
+ if (!((unsigned int)p->state & state))
+ goto out_unlock;
+
+ trace_sched_waking(p);
+
+ if (task_queued(p) || task_running(p))
+ goto out_running;
+
+ ttwu_activate(p, rq, wake_flags & WF_SYNC);
+ success = true;
+
+out_running:
+ ttwu_post_activation(p, rq, success);
+out_unlock:
+ task_grq_unlock(&flags);
+
+ ttwu_stat(p, cpu, wake_flags);
+
+ put_cpu();
+
+ return success;
+}
+
+/**
+ * try_to_wake_up_local - try to wake up a local task with grq lock held
+ * @p: the thread to be awakened
+ *
+ * Put @p on the run-queue if it's not already there. The caller must
+ * ensure that grq is locked and, @p is not the current task.
+ * grq stays locked over invocation.
+ */
+static void try_to_wake_up_local(struct task_struct *p)
+{
+ struct rq *rq = task_rq(p);
+ bool success = false;
+
+ lockdep_assert_held(&grq.lock);
+
+ if (!(p->state & TASK_NORMAL))
+ return;
+
+ trace_sched_waking(p);
+
+ if (!task_queued(p)) {
+ if (likely(!task_running(p))) {
+ schedstat_inc(rq, ttwu_count);
+ schedstat_inc(rq, ttwu_local);
+ }
+ ttwu_activate(p, rq, false);
+ ttwu_stat(p, smp_processor_id(), 0);
+ success = true;
+ }
+ ttwu_post_activation(p, rq, success);
+}
+
+/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
+ */
+int wake_up_process(struct task_struct *p)
+{
+ return try_to_wake_up(p, TASK_NORMAL, 0);
+}
+EXPORT_SYMBOL(wake_up_process);
+
+int wake_up_state(struct task_struct *p, unsigned int state)
+{
+ return try_to_wake_up(p, state, 0);
+}
+
+static void time_slice_expired(struct task_struct *p);
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ */
+int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif
+ /*
+ * The process state is set to the same value of the process executing
+ * do_fork() code. That is running. This guarantees that nobody will
+ * actually run it, and a signal or other external event cannot wake
+ * it up and insert it on the runqueue either.
+ */
+
+ /* Should be reset in fork.c but done here for ease of bfs patching */
+ p->on_rq =
+ p->utime =
+ p->stime =
+ p->utimescaled =
+ p->stimescaled =
+ p->sched_time =
+ p->stime_pc =
+ p->utime_pc = 0;
+
+ /*
+ * Revert to default priority/policy on fork if requested.
+ */
+ if (unlikely(p->sched_reset_on_fork)) {
+ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
+ p->policy = SCHED_NORMAL;
+ p->normal_prio = normal_prio(p);
+ }
+
+ if (PRIO_TO_NICE(p->static_prio) < 0) {
+ p->static_prio = NICE_TO_PRIO(0);
+ p->normal_prio = p->static_prio;
+ }
+
+ /*
+ * We don't need the reset flag anymore after the fork. It has
+ * fulfilled its duty:
+ */
+ p->sched_reset_on_fork = 0;
+ }
+
+ INIT_LIST_HEAD(&p->run_list);
+#ifdef CONFIG_SCHED_INFO
+ if (unlikely(sched_info_on()))
+ memset(&p->sched_info, 0, sizeof(p->sched_info));
+#endif
+ p->on_cpu = false;
+ clear_sticky(p);
+ init_task_preempt_count(p);
+ return 0;
+}
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+void wake_up_new_task(struct task_struct *p)
+{
+ struct task_struct *parent;
+ unsigned long flags;
+ struct rq *rq;
+
+ parent = p->parent;
+ rq = task_grq_lock(p, &flags);
+
+ /*
+ * Reinit new task deadline as its creator deadline could have changed
+ * since call to dup_task_struct().
+ */
+ p->deadline = rq->rq_deadline;
+
+ /*
+ * If the task is a new process, current and parent are the same. If
+ * the task is a new thread in the thread group, it will have much more
+ * in common with current than with the parent.
+ */
+ set_task_cpu(p, task_cpu(rq->curr));
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child.
+ */
+ p->prio = rq->curr->normal_prio;
+
+ activate_task(p, rq);
+ trace_sched_wakeup_new(p);
+ if (unlikely(p->policy == SCHED_FIFO))
+ goto after_ts_init;
+
+ /*
+ * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesn't change,
+ * resulting in more scheduling fairness. If it's negative, it won't
+ * matter since that's the same as being 0. current's time_slice is
+ * actually in rq_time_slice when it's running, as is its last_ran
+ * value. rq->rq_deadline is only modified within schedule() so it
+ * is always equal to current->deadline.
+ */
+ p->last_ran = rq->rq_last_ran;
+ if (likely(rq->rq_time_slice >= RESCHED_US * 2)) {
+ rq->rq_time_slice /= 2;
+ p->time_slice = rq->rq_time_slice;
+after_ts_init:
+ if (rq->curr == parent && !suitable_idle_cpus(p)) {
+ /*
+ * The VM isn't cloned, so we're in a good position to
+ * do child-runs-first in anticipation of an exec. This
+ * usually avoids a lot of COW overhead.
+ */
+ __set_tsk_resched(parent);
+ } else
+ try_preempt(p, rq);
+ } else {
+ if (rq->curr == parent) {
+ /*
+ * Forking task has run out of timeslice. Reschedule it and
+ * start its child with a new time slice and deadline. The
+ * child will end up running first because its deadline will
+ * be slightly earlier.
+ */
+ rq->rq_time_slice = 0;
+ __set_tsk_resched(parent);
+ }
+ time_slice_expired(p);
+ }
+ task_grq_unlock(&flags);
+}
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
+
+void preempt_notifier_inc(void)
+{
+ static_key_slow_inc(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
+
+void preempt_notifier_dec(void)
+{
+ static_key_slow_dec(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
+
+/**
+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
+ * @notifier: notifier struct to register
+ */
+void preempt_notifier_register(struct preempt_notifier *notifier)
+{
+ if (!static_key_false(&preempt_notifier_key))
+ WARN(1, "registering preempt_notifier while notifiers disabled\n");
+
+ hlist_add_head(&notifier->link, &current->preempt_notifiers);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_register);
+
+/**
+ * preempt_notifier_unregister - no longer interested in preemption notifications
+ * @notifier: notifier struct to unregister
+ *
+ * This is *not* safe to call from within a preemption notifier.
+ */
+void preempt_notifier_unregister(struct preempt_notifier *notifier)
+{
+ hlist_del(&notifier->link);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
+
+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+ struct preempt_notifier *notifier;
+
+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
+}
+
+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+ if (static_key_false(&preempt_notifier_key))
+ __fire_sched_in_preempt_notifiers(curr);
+}
+
+static void
+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+ struct preempt_notifier *notifier;
+
+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
+ notifier->ops->sched_out(notifier, next);
+}
+
+static __always_inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+ if (static_key_false(&preempt_notifier_key))
+ __fire_sched_out_preempt_notifiers(curr, next);
+}
+
+#else /* !CONFIG_PREEMPT_NOTIFIERS */
+
+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+}
+
+static inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+}
+
+#endif /* CONFIG_PREEMPT_NOTIFIERS */
+
+/**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+ * @next: the task we are going to switch to.
+ *
+ * This is called with the rq lock held and interrupts off. It must
+ * be paired with a subsequent finish_task_switch after the context
+ * switch.
+ *
+ * prepare_task_switch sets up locking and calls architecture specific
+ * hooks.
+ */
+static inline void
+prepare_task_switch(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ sched_info_switch(rq, prev, next);
+ perf_event_task_sched_out(prev, next);
+ fire_sched_out_preempt_notifiers(prev, next);
+ prepare_lock_switch(rq, next);
+ prepare_arch_switch(next);
+}
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * finish_task_switch must be called after the context switch, paired
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock. (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
+ */
+static struct rq *finish_task_switch(struct task_struct *prev)
+ __releases(grq.lock)
+{
+ struct rq *rq = this_rq();
+ struct mm_struct *mm = rq->prev_mm;
+ long prev_state;
+
+ /*
+ * The previous task will have left us with a preempt_count of 2
+ * because it left us after:
+ *
+ * schedule()
+ * preempt_disable(); // 1
+ * __schedule()
+ * raw_spin_lock_irq(&rq->lock) // 2
+ *
+ * Also, see FORK_PREEMPT_COUNT.
+ */
+ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
+ "corrupted preempt_count: %s/%d/0x%x\n",
+ current->comm, current->pid, preempt_count()))
+ preempt_count_set(FORK_PREEMPT_COUNT);
+
+ rq->prev_mm = NULL;
+
+ /*
+ * A task struct has one reference for the use as "current".
+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+ * schedule one last time. The schedule call will never return, and
+ * the scheduled task must drop that reference.
+ *
+ * We must observe prev->state before clearing prev->on_cpu (in
+ * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * running on another CPU and we could rave with its RUNNING -> DEAD
+ * transition, resulting in a double drop.
+ */
+ prev_state = prev->state;
+ vtime_task_switch(prev);
+ perf_event_task_sched_in(prev, current);
+ finish_lock_switch(rq, prev);
+ finish_arch_post_lock_switch();
+
+ fire_sched_in_preempt_notifiers(current);
+ if (mm)
+ mmdrop(mm);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ /*
+ * Remove function-return probe instances associated with this
+ * task and put them back on the free list.
+ */
+ kprobe_flush_task(prev);
+ put_task_struct(prev);
+ }
+ return rq;
+}
+
+/**
+ * schedule_tail - first thing a freshly forked thread must call.
+ * @prev: the thread we just switched away from.
+ */
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
+ __releases(grq.lock)
+{
+ struct rq *rq;
+
+ /*
+ * New tasks start with FORK_PREEMPT_COUNT, see there and
+ * finish_task_switch() for details.
+ *
+ * finish_task_switch() will drop rq->lock() and lower preempt_count
+ * and the preempt_enable() will end up enabling preemption (on
+ * PREEMPT_COUNT kernels).
+ */
+
+ rq = finish_task_switch(prev);
+ preempt_enable();
+
+ if (current->set_child_tid)
+ put_user(task_pid_vnr(current), current->set_child_tid);
+}
+
+/*
+ * context_switch - switch to the new MM and the new thread's register state.
+ */
+static inline struct rq *
+context_switch(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct mm_struct *mm, *oldmm;
+
+ prepare_task_switch(rq, prev, next);
+
+ mm = next->mm;
+ oldmm = prev->active_mm;
+ /*
+ * For paravirt, this is coupled with an exit in switch_to to
+ * combine the page table reload and the switch backend into
+ * one hypercall.
+ */
+ arch_start_context_switch(prev);
+
+ if (!mm) {
+ next->active_mm = oldmm;
+ atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next);
+ } else
+ switch_mm(oldmm, mm, next);
+
+ if (!prev->mm) {
+ prev->active_mm = NULL;
+ rq->prev_mm = oldmm;
+ }
+ /*
+ * Since the runqueue lock will be released by the next
+ * task (which is an invalid locking op but in the case
+ * of the scheduler it's an obvious special-case), so we
+ * do an early lockdep release here:
+ */
+ spin_release(&grq.lock.dep_map, 1, _THIS_IP_);
+
+ /* Here we just switch the register state and the stack. */
+ switch_to(prev, next, prev);
+ barrier();
+
+ return finish_task_switch(prev);
+}
+
+/*
+ * nr_running, nr_uninterruptible and nr_context_switches:
+ *
+ * externally visible scheduler statistics: current number of runnable
+ * threads, total number of context switches performed since bootup. All are
+ * measured without grabbing the grq lock but the occasional inaccurate result
+ * doesn't matter so long as it's positive.
+ */
+unsigned long nr_running(void)
+{
+ long nr = grq.nr_running;
+
+ if (unlikely(nr < 0))
+ nr = 0;
+ return (unsigned long)nr;
+}
+
+static unsigned long nr_uninterruptible(void)
+{
+ long nu = grq.nr_uninterruptible;
+
+ if (unlikely(nu < 0))
+ nu = 0;
+ return nu;
+}
+
+/*
+ * Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race. The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
+ */
+bool single_task_running(void)
+{
+ if (cpu_rq(smp_processor_id())->soft_affined == 1)
+ return true;
+ else
+ return false;
+}
+EXPORT_SYMBOL(single_task_running);
+
+unsigned long long nr_context_switches(void)
+{
+ long long ns = grq.nr_switches;
+
+ /* This is of course impossible */
+ if (unlikely(ns < 0))
+ ns = 1;
+ return (unsigned long long)ns;
+}
+
+unsigned long nr_iowait(void)
+{
+ unsigned long i, sum = 0;
+
+ for_each_possible_cpu(i)
+ sum += atomic_read(&cpu_rq(i)->nr_iowait);
+
+ return sum;
+}
+
+unsigned long nr_iowait_cpu(int cpu)
+{
+ struct rq *this = cpu_rq(cpu);
+ return atomic_read(&this->nr_iowait);
+}
+
+unsigned long nr_active(void)
+{
+ return nr_running() + nr_uninterruptible();
+}
+
+/* Beyond a task running on this CPU, load is equal everywhere on BFS, so we
+ * base it on the number of running or queued tasks with their ->rq pointer
+ * set to this cpu as being the CPU they're more likely to run on. */
+void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
+{
+ struct rq *rq = this_rq();
+
+ *nr_waiters = atomic_read(&rq->nr_iowait);
+ *load = rq->soft_affined;
+}
+
+/* Variables and functions for calc_load */
+static unsigned long calc_load_update;
+unsigned long avenrun[3];
+EXPORT_SYMBOL(avenrun);
+
+/**
+ * get_avenrun - get the load average array
+ * @loads: pointer to dest load array
+ * @offset: offset to add
+ * @shift: shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+ loads[0] = (avenrun[0] + offset) << shift;
+ loads[1] = (avenrun[1] + offset) << shift;
+ loads[2] = (avenrun[2] + offset) << shift;
+}
+
+static unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+ load *= exp;
+ load += active * (FIXED_1 - exp);
+ return load >> FSHIFT;
+}
+
+/*
+ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
+ */
+void calc_global_load(unsigned long ticks)
+{
+ long active;
+
+ if (time_before(jiffies, calc_load_update))
+ return;
+ active = nr_active() * FIXED_1;
+
+ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
+ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
+ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
+
+ calc_load_update = jiffies + LOAD_FREQ;
+}
+
+DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
+
+EXPORT_PER_CPU_SYMBOL(kstat);
+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
+/*
+ * There are no locks covering percpu hardirq/softirq time.
+ * They are only modified in account_system_vtime, on corresponding CPU
+ * with interrupts disabled. So, writes are safe.
+ * They are read and saved off onto struct rq in update_rq_clock().
+ * This may result in other CPU reading this CPU's irq time and can
+ * race with irq/account_system_vtime on this CPU. We would either get old
+ * or new value with a side effect of accounting a slice of irq time to wrong
+ * task when irq is in progress while we read rq->clock. That is a worthy
+ * compromise in place of having locks on each irq in account_system_time.
+ */
+static DEFINE_PER_CPU(u64, cpu_hardirq_time);
+static DEFINE_PER_CPU(u64, cpu_softirq_time);
+
+static DEFINE_PER_CPU(u64, irq_start_time);
+static int sched_clock_irqtime;
+
+void enable_sched_clock_irqtime(void)
+{
+ sched_clock_irqtime = 1;
+}
+
+void disable_sched_clock_irqtime(void)
+{
+ sched_clock_irqtime = 0;
+}
+
+#ifndef CONFIG_64BIT
+static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
+
+static inline void irq_time_write_begin(void)
+{
+ __this_cpu_inc(irq_time_seq.sequence);
+ smp_wmb();
+}
+
+static inline void irq_time_write_end(void)
+{
+ smp_wmb();
+ __this_cpu_inc(irq_time_seq.sequence);
+}
+
+static inline u64 irq_time_read(int cpu)
+{
+ u64 irq_time;
+ unsigned seq;
+
+ do {
+ seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
+ irq_time = per_cpu(cpu_softirq_time, cpu) +
+ per_cpu(cpu_hardirq_time, cpu);
+ } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
+
+ return irq_time;
+}
+#else /* CONFIG_64BIT */
+static inline void irq_time_write_begin(void)
+{
+}
+
+static inline void irq_time_write_end(void)
+{
+}
+
+static inline u64 irq_time_read(int cpu)
+{
+ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
+}
+#endif /* CONFIG_64BIT */
+
+/*
+ * Called before incrementing preempt_count on {soft,}irq_enter
+ * and before decrementing preempt_count on {soft,}irq_exit.
+ */
+void irqtime_account_irq(struct task_struct *curr)
+{
+ unsigned long flags;
+ s64 delta;
+ int cpu;
+
+ if (!sched_clock_irqtime)
+ return;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
+ __this_cpu_add(irq_start_time, delta);
+
+ irq_time_write_begin();
+ /*
+ * We do not account for softirq time from ksoftirqd here.
+ * We want to continue accounting softirq time to ksoftirqd thread
+ * in that case, so as not to confuse scheduler with a special task
+ * that do not consume any time, but still wants to run.
+ */
+ if (hardirq_count())
+ __this_cpu_add(cpu_hardirq_time, delta);
+ else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
+ __this_cpu_add(cpu_softirq_time, delta);
+
+ irq_time_write_end();
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(irqtime_account_irq);
+
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+#ifdef CONFIG_PARAVIRT
+static inline u64 steal_ticks(u64 steal)
+{
+ if (unlikely(steal > NSEC_PER_SEC))
+ return div_u64(steal, TICK_NSEC);
+
+ return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
+}
+#endif
+
+static void update_rq_clock_task(struct rq *rq, s64 delta)
+{
+/*
+ * In theory, the compile should just see 0 here, and optimize out the call
+ * to sched_rt_avg_update. But I don't trust it...
+ */
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+
+ /*
+ * Since irq_time is only updated on {soft,}irq_exit, we might run into
+ * this case when a previous update_rq_clock() happened inside a
+ * {soft,}irq region.
+ *
+ * When this happens, we stop ->clock_task and only update the
+ * prev_irq_time stamp to account for the part that fit, so that a next
+ * update will consume the rest. This ensures ->clock_task is
+ * monotonic.
+ *
+ * It does however cause some slight miss-attribution of {soft,}irq
+ * time, a more accurate solution would be to update the irq_time using
+ * the current rq->clock timestamp, except that would require using
+ * atomic ops.
+ */
+ if (irq_delta > delta)
+ irq_delta = delta;
+
+ rq->prev_irq_time += irq_delta;
+ delta -= irq_delta;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ if (static_key_false((&paravirt_steal_rq_enabled))) {
+ s64 steal = paravirt_steal_clock(cpu_of(rq));
+
+ steal -= rq->prev_steal_time_rq;
+
+ if (unlikely(steal > delta))
+ steal = delta;
+
+ rq->prev_steal_time_rq += steal;
+
+ delta -= steal;
+ }
+#endif
+
+ rq->clock_task += delta;
+}
+
+#ifndef nsecs_to_cputime
+# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+static void irqtime_account_hi_si(void)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+ u64 latest_ns;
+
+ latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time));
+ if (latest_ns > cpustat[CPUTIME_IRQ])
+ cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy;
+
+ latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time));
+ if (latest_ns > cpustat[CPUTIME_SOFTIRQ])
+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy;
+}
+#else /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+#define sched_clock_irqtime (0)
+
+static inline void irqtime_account_hi_si(void)
+{
+}
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+static __always_inline bool steal_account_process_tick(void)
+{
+#ifdef CONFIG_PARAVIRT
+ if (static_key_false(&paravirt_steal_enabled)) {
+ u64 steal;
+ cputime_t steal_ct;
+
+ steal = paravirt_steal_clock(smp_processor_id());
+ steal -= this_rq()->prev_steal_time;
+
+ /*
+ * cputime_t may be less precise than nsecs (eg: if it's
+ * based on jiffies). Lets cast the result to cputime
+ * granularity and account the rest on the next rounds.
+ */
+ steal_ct = nsecs_to_cputime(steal);
+ this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
+
+ account_steal_time(steal_ct);
+ return steal_ct;
+ }
+#endif
+ return false;
+}
+
+/*
+ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
+ * tasks (sum on group iteration) belonging to @tsk's group.
+ */
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
+{
+ struct signal_struct *sig = tsk->signal;
+ cputime_t utime, stime;
+ struct task_struct *t;
+ unsigned int seq, nextseq;
+ unsigned long flags;
+
+ rcu_read_lock();
+ /* Attempt a lockless read on the first round. */
+ nextseq = 0;
+ do {
+ seq = nextseq;
+ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
+ times->utime = sig->utime;
+ times->stime = sig->stime;
+ times->sum_exec_runtime = sig->sum_sched_runtime;
+
+ for_each_thread(tsk, t) {
+ task_cputime(t, &utime, &stime);
+ times->utime += utime;
+ times->stime += stime;
+ times->sum_exec_runtime += task_sched_runtime(t);
+ }
+ /* If lockless access failed, take the lock. */
+ nextseq = 1;
+ } while (need_seqretry(&sig->stats_lock, seq));
+ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
+ rcu_read_unlock();
+}
+
+/*
+ * On each tick, see what percentage of that tick was attributed to each
+ * component and add the percentage to the _pc values. Once a _pc value has
+ * accumulated one tick's worth, account for that. This means the total
+ * percentage of load components will always be 128 (pseudo 100) per tick.
+ */
+static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long pc)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+
+ if (atomic_read(&rq->nr_iowait) > 0) {
+ rq->iowait_pc += pc;
+ if (rq->iowait_pc >= 128) {
+ cpustat[CPUTIME_IOWAIT] += (__force u64)cputime_one_jiffy * rq->iowait_pc / 128;
+ rq->iowait_pc %= 128;
+ }
+ } else {
+ rq->idle_pc += pc;
+ if (rq->idle_pc >= 128) {
+ cpustat[CPUTIME_IDLE] += (__force u64)cputime_one_jiffy * rq->idle_pc / 128;
+ rq->idle_pc %= 128;
+ }
+ }
+ acct_update_integrals(idle);
+}
+
+static void
+pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset,
+ unsigned long pc, unsigned long ns)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+
+ p->stime_pc += pc;
+ if (p->stime_pc >= 128) {
+ int jiffs = p->stime_pc / 128;
+
+ p->stime_pc %= 128;
+ p->stime += (__force u64)cputime_one_jiffy * jiffs;
+ p->stimescaled += one_jiffy_scaled * jiffs;
+ account_group_system_time(p, cputime_one_jiffy * jiffs);
+ }
+ p->sched_time += ns;
+ account_group_exec_runtime(p, ns);
+
+ if (hardirq_count() - hardirq_offset) {
+ rq->irq_pc += pc;
+ if (rq->irq_pc >= 128) {
+ cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy * rq->irq_pc / 128;
+ rq->irq_pc %= 128;
+ }
+ } else if (in_serving_softirq()) {
+ rq->softirq_pc += pc;
+ if (rq->softirq_pc >= 128) {
+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy * rq->softirq_pc / 128;
+ rq->softirq_pc %= 128;
+ }
+ } else {
+ rq->system_pc += pc;
+ if (rq->system_pc >= 128) {
+ cpustat[CPUTIME_SYSTEM] += (__force u64)cputime_one_jiffy * rq->system_pc / 128;
+ rq->system_pc %= 128;
+ }
+ }
+ acct_update_integrals(p);
+}
+
+static void pc_user_time(struct rq *rq, struct task_struct *p,
+ unsigned long pc, unsigned long ns)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+
+ p->utime_pc += pc;
+ if (p->utime_pc >= 128) {
+ int jiffs = p->utime_pc / 128;
+
+ p->utime_pc %= 128;
+ p->utime += (__force u64)cputime_one_jiffy * jiffs;
+ p->utimescaled += one_jiffy_scaled * jiffs;
+ account_group_user_time(p, cputime_one_jiffy * jiffs);
+ }
+ p->sched_time += ns;
+ account_group_exec_runtime(p, ns);
+
+ if (this_cpu_ksoftirqd() == p) {
+ /*
+ * ksoftirqd time do not get accounted in cpu_softirq_time.
+ * So, we have to handle it separately here.
+ */
+ rq->softirq_pc += pc;
+ if (rq->softirq_pc >= 128) {
+ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy * rq->softirq_pc / 128;
+ rq->softirq_pc %= 128;
+ }
+ }
+
+ if (task_nice(p) > 0 || idleprio_task(p)) {
+ rq->nice_pc += pc;
+ if (rq->nice_pc >= 128) {
+ cpustat[CPUTIME_NICE] += (__force u64)cputime_one_jiffy * rq->nice_pc / 128;
+ rq->nice_pc %= 128;
+ }
+ } else {
+ rq->user_pc += pc;
+ if (rq->user_pc >= 128) {
+ cpustat[CPUTIME_USER] += (__force u64)cputime_one_jiffy * rq->user_pc / 128;
+ rq->user_pc %= 128;
+ }
+ }
+ acct_update_integrals(p);
+}
+
+/*
+ * Convert nanoseconds to pseudo percentage of one tick. Use 128 for fast
+ * shifts instead of 100
+ */
+#define NS_TO_PC(NS) (NS * 128 / JIFFY_NS)
+
+/*
+ * This is called on clock ticks.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ * CPU scheduler quota accounting is also performed here in microseconds.
+ */
+static void
+update_cpu_clock_tick(struct rq *rq, struct task_struct *p)
+{
+ long account_ns = rq->clock_task - rq->rq_last_ran;
+ struct task_struct *idle = rq->idle;
+ unsigned long account_pc;
+
+ if (unlikely(account_ns < 0) || steal_account_process_tick())
+ goto ts_account;
+
+ account_pc = NS_TO_PC(account_ns);
+
+ /* Accurate tick timekeeping */
+ if (user_mode(get_irq_regs()))
+ pc_user_time(rq, p, account_pc, account_ns);
+ else if (p != idle || (irq_count() != HARDIRQ_OFFSET))
+ pc_system_time(rq, p, HARDIRQ_OFFSET,
+ account_pc, account_ns);
+ else
+ pc_idle_time(rq, idle, account_pc);
+
+ if (sched_clock_irqtime)
+ irqtime_account_hi_si();
+
+ts_account:
+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
+ if (rq->rq_policy != SCHED_FIFO && p != idle) {
+ s64 time_diff = rq->clock - rq->timekeep_clock;
+
+ niffy_diff(&time_diff, 1);
+ rq->rq_time_slice -= NS_TO_US(time_diff);
+ }
+
+ rq->rq_last_ran = rq->clock_task;
+ rq->timekeep_clock = rq->clock;
+}
+
+/*
+ * This is called on context switches.
+ * Bank in p->sched_time the ns elapsed since the last tick or switch.
+ * CPU scheduler quota accounting is also performed here in microseconds.
+ */
+static void
+update_cpu_clock_switch(struct rq *rq, struct task_struct *p)
+{
+ long account_ns = rq->clock_task - rq->rq_last_ran;
+ struct task_struct *idle = rq->idle;
+ unsigned long account_pc;
+
+ if (unlikely(account_ns < 0))
+ goto ts_account;
+
+ account_pc = NS_TO_PC(account_ns);
+
+ /* Accurate subtick timekeeping */
+ if (p != idle) {
+ pc_user_time(rq, p, account_pc, account_ns);
+ }
+ else
+ pc_idle_time(rq, idle, account_pc);
+
+ts_account:
+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
+ if (rq->rq_policy != SCHED_FIFO && p != idle) {
+ s64 time_diff = rq->clock - rq->timekeep_clock;
+
+ niffy_diff(&time_diff, 1);
+ rq->rq_time_slice -= NS_TO_US(time_diff);
+ }
+
+ rq->rq_last_ran = rq->clock_task;
+ rq->timekeep_clock = rq->clock;
+}
+
+/*
+ * Return any ns on the sched_clock that have not yet been accounted in
+ * @p in case that task is currently running.
+ *
+ * Called with task_grq_lock() held.
+ */
+static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
+{
+ u64 ns = 0;
+
+ /*
+ * Must be ->curr _and_ ->on_rq. If dequeued, we would
+ * project cycles that may never be accounted to this
+ * thread, breaking clock_gettime().
+ */
+ if (p == rq->curr && p->on_rq) {
+ update_clocks(rq);
+ ns = rq->clock_task - rq->rq_last_ran;
+ if (unlikely((s64)ns < 0))
+ ns = 0;
+ }
+
+ return ns;
+}
+
+/*
+ * Return accounted runtime for the task.
+ * Return separately the current's pending runtime that have not been
+ * accounted yet.
+ *
+ */
+unsigned long long task_sched_runtime(struct task_struct *p)
+{
+ unsigned long flags;
+ struct rq *rq;
+ u64 ns;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+ /*
+ * 64-bit doesn't need locks to atomically read a 64bit value.
+ * So we have a optimization chance when the task's delta_exec is 0.
+ * Reading ->on_cpu is racy, but this is ok.
+ *
+ * If we race with it leaving cpu, we'll take a lock. So we're correct.
+ * If we race with it entering cpu, unaccounted time is 0. This is
+ * indistinguishable from the read occurring a few cycles earlier.
+ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+ * been accounted, so we're correct here as well.
+ */
+ if (!p->on_cpu || !p->on_rq)
+ return tsk_seruntime(p);
+#endif
+
+ rq = task_grq_lock(p, &flags);
+ ns = p->sched_time + do_task_delta_exec(p, rq);
+ task_grq_unlock(&flags);
+
+ return ns;
+}
+
+/* Compatibility crap */
+void account_user_time(struct task_struct *p, cputime_t cputime,
+ cputime_t cputime_scaled)
+{
+}
+
+void account_idle_time(cputime_t cputime)
+{
+}
+
+/*
+ * Account guest cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in virtual machine since the last update
+ * @cputime_scaled: cputime scaled by cpu frequency
+ */
+static void account_guest_time(struct task_struct *p, cputime_t cputime,
+ cputime_t cputime_scaled)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+
+ /* Add guest time to process. */
+ p->utime += (__force u64)cputime;
+ p->utimescaled += (__force u64)cputime_scaled;
+ account_group_user_time(p, cputime);
+ p->gtime += (__force u64)cputime;
+
+ /* Add guest time to cpustat. */
+ if (task_nice(p) > 0) {
+ cpustat[CPUTIME_NICE] += (__force u64)cputime;
+ cpustat[CPUTIME_GUEST_NICE] += (__force u64)cputime;
+ } else {
+ cpustat[CPUTIME_USER] += (__force u64)cputime;
+ cpustat[CPUTIME_GUEST] += (__force u64)cputime;
+ }
+}
+
+/*
+ * Account system cpu time to a process and desired cpustat field
+ * @p: the process that the cpu time gets accounted to
+ * @cputime: the cpu time spent in kernel space since the last update
+ * @cputime_scaled: cputime scaled by cpu frequency
+ * @target_cputime64: pointer to cpustat field that has to be updated
+ */
+static inline
+void __account_system_time(struct task_struct *p, cputime_t cputime,
+ cputime_t cputime_scaled, cputime64_t *target_cputime64)
+{
+ /* Add system time to process. */
+ p->stime += (__force u64)cputime;
+ p->stimescaled += (__force u64)cputime_scaled;
+ account_group_system_time(p, cputime);
+
+ /* Add system time to cpustat. */
+ *target_cputime64 += (__force u64)cputime;
+
+ /* Account for system time used */
+ acct_update_integrals(p);
+}
+
+/*
+ * Account system cpu time to a process.
+ * @p: the process that the cpu time gets accounted to
+ * @hardirq_offset: the offset to subtract from hardirq_count()
+ * @cputime: the cpu time spent in kernel space since the last update
+ * @cputime_scaled: cputime scaled by cpu frequency
+ * This is for guest only now.
+ */
+void account_system_time(struct task_struct *p, int hardirq_offset,
+ cputime_t cputime, cputime_t cputime_scaled)
+{
+
+ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
+ account_guest_time(p, cputime, cputime_scaled);
+}
+
+/*
+ * Account for involuntary wait time.
+ * @steal: the cpu time spent in involuntary wait
+ */
+void account_steal_time(cputime_t cputime)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+
+ cpustat[CPUTIME_STEAL] += (__force u64)cputime;
+}
+
+/*
+ * Account for idle time.
+ * @cputime: the cpu time spent in idle wait
+ */
+static void account_idle_times(cputime_t cputime)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+ struct rq *rq = this_rq();
+
+ if (atomic_read(&rq->nr_iowait) > 0)
+ cpustat[CPUTIME_IOWAIT] += (__force u64)cputime;
+ else
+ cpustat[CPUTIME_IDLE] += (__force u64)cputime;
+}
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+
+void account_process_tick(struct task_struct *p, int user_tick)
+{
+}
+
+/*
+ * Account multiple ticks of steal time.
+ * @p: the process from which the cpu time has been stolen
+ * @ticks: number of stolen ticks
+ */
+void account_steal_ticks(unsigned long ticks)
+{
+ account_steal_time(jiffies_to_cputime(ticks));
+}
+
+/*
+ * Account multiple ticks of idle time.
+ * @ticks: number of stolen ticks
+ */
+void account_idle_ticks(unsigned long ticks)
+{
+ account_idle_times(jiffies_to_cputime(ticks));
+}
+#endif
+
+static inline void grq_iso_lock(void)
+ __acquires(grq.iso_lock)
+{
+ raw_spin_lock(&grq.iso_lock);
+}
+
+static inline void grq_iso_unlock(void)
+ __releases(grq.iso_lock)
+{
+ raw_spin_unlock(&grq.iso_lock);
+}
+
+/*
+ * Functions to test for when SCHED_ISO tasks have used their allocated
+ * quota as real time scheduling and convert them back to SCHED_NORMAL.
+ * Where possible, the data is tested lockless, to avoid grabbing iso_lock
+ * because the occasional inaccurate result won't matter. However the
+ * tick data is only ever modified under lock. iso_refractory is only simply
+ * set to 0 or 1 so it's not worth grabbing the lock yet again for that.
+ */
+static bool set_iso_refractory(void)
+{
+ grq.iso_refractory = true;
+ return grq.iso_refractory;
+}
+
+static bool clear_iso_refractory(void)
+{
+ grq.iso_refractory = false;
+ return grq.iso_refractory;
+}
+
+/*
+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
+ * tasks and set the refractory flag if necessary. There is 10% hysteresis
+ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a
+ * slow division.
+ */
+static bool test_ret_isorefractory(struct rq *rq)
+{
+ if (likely(!grq.iso_refractory)) {
+ if (grq.iso_ticks > ISO_PERIOD * sched_iso_cpu)
+ return set_iso_refractory();
+ } else {
+ if (grq.iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128))
+ return clear_iso_refractory();
+ }
+ return grq.iso_refractory;
+}
+
+static void iso_tick(void)
+{
+ grq_iso_lock();
+ grq.iso_ticks += 100;
+ grq_iso_unlock();
+}
+
+/* No SCHED_ISO task was running so decrease rq->iso_ticks */
+static inline void no_iso_tick(void)
+{
+ if (grq.iso_ticks) {
+ grq_iso_lock();
+ grq.iso_ticks -= grq.iso_ticks / ISO_PERIOD + 1;
+ if (unlikely(grq.iso_refractory && grq.iso_ticks <
+ ISO_PERIOD * (sched_iso_cpu * 115 / 128)))
+ clear_iso_refractory();
+ grq_iso_unlock();
+ }
+}
+
+/* This manages tasks that have run out of timeslice during a scheduler_tick */
+static void task_running_tick(struct rq *rq)
+{
+ struct task_struct *p;
+
+ /*
+ * If a SCHED_ISO task is running we increment the iso_ticks. In
+ * order to prevent SCHED_ISO tasks from causing starvation in the
+ * presence of true RT tasks we account those as iso_ticks as well.
+ */
+ if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) {
+ if (grq.iso_ticks <= (ISO_PERIOD * 128) - 128)
+ iso_tick();
+ } else
+ no_iso_tick();
+
+ if (iso_queue(rq)) {
+ if (unlikely(test_ret_isorefractory(rq))) {
+ if (rq_running_iso(rq)) {
+ /*
+ * SCHED_ISO task is running as RT and limit
+ * has been hit. Force it to reschedule as
+ * SCHED_NORMAL by zeroing its time_slice
+ */
+ rq->rq_time_slice = 0;
+ }
+ }
+ }
+
+ /* SCHED_FIFO tasks never run out of timeslice. */
+ if (rq->rq_policy == SCHED_FIFO)
+ return;
+ /*
+ * Tasks that were scheduled in the first half of a tick are not
+ * allowed to run into the 2nd half of the next tick if they will
+ * run out of time slice in the interim. Otherwise, if they have
+ * less than RESCHED_US μs of time slice left they will be rescheduled.
+ */
+ if (rq->dither) {
+ if (rq->rq_time_slice > HALF_JIFFY_US)
+ return;
+ else
+ rq->rq_time_slice = 0;
+ } else if (rq->rq_time_slice >= RESCHED_US)
+ return;
+
+ /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */
+ p = rq->curr;
+
+ grq_lock();
+ requeue_task(p);
+ __set_tsk_resched(p);
+ grq_unlock();
+}
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled. The data modified is all
+ * local to struct rq so we don't need to grab grq lock.
+ */
+void scheduler_tick(void)
+{
+ int cpu __maybe_unused = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+
+ sched_clock_tick();
+ /* grq lock not grabbed, so only update rq clock */
+ update_rq_clock(rq);
+ update_cpu_clock_tick(rq, rq->curr);
+ if (!rq_idle(rq))
+ task_running_tick(rq);
+ else
+ no_iso_tick();
+ rq->last_tick = rq->clock;
+ perf_event_task_tick();
+}
+
+notrace unsigned long get_parent_ip(unsigned long addr)
+{
+ if (in_lock_functions(addr)) {
+ addr = CALLER_ADDR2;
+ if (in_lock_functions(addr))
+ addr = CALLER_ADDR3;
+ }
+ return addr;
+}
+
+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+ defined(CONFIG_PREEMPT_TRACER))
+void preempt_count_add(int val)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Underflow?
+ */
+ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
+ return;
+#endif
+ __preempt_count_add(val);
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Spinlock count overflowing soon?
+ */
+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+ PREEMPT_MASK - 10);
+#endif
+ if (preempt_count() == val) {
+ unsigned long ip = get_parent_ip(CALLER_ADDR1);
+#ifdef CONFIG_DEBUG_PREEMPT
+ current->preempt_disable_ip = ip;
+#endif
+ trace_preempt_off(CALLER_ADDR0, ip);
+ }
+}
+EXPORT_SYMBOL(preempt_count_add);
+NOKPROBE_SYMBOL(preempt_count_add);
+
+void preempt_count_sub(int val)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Underflow?
+ */
+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+ return;
+ /*
+ * Is the spinlock portion underflowing?
+ */
+ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
+ !(preempt_count() & PREEMPT_MASK)))
+ return;
+#endif
+
+ if (preempt_count() == val)
+ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+ __preempt_count_sub(val);
+}
+EXPORT_SYMBOL(preempt_count_sub);
+NOKPROBE_SYMBOL(preempt_count_sub);
+#endif
+
+/*
+ * Deadline is "now" in niffies + (offset by priority). Setting the deadline
+ * is the key to everything. It distributes cpu fairly amongst tasks of the
+ * same nice value, it proportions cpu according to nice level, it means the
+ * task that last woke up the longest ago has the earliest deadline, thus
+ * ensuring that interactive tasks get low latency on wake up. The CPU
+ * proportion works out to the square of the virtual deadline difference, so
+ * this equation will give nice 19 3% CPU compared to nice 0.
+ */
+static inline u64 prio_deadline_diff(int user_prio)
+{
+ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128));
+}
+
+static inline u64 task_deadline_diff(struct task_struct *p)
+{
+ return prio_deadline_diff(TASK_USER_PRIO(p));
+}
+
+static inline u64 static_deadline_diff(int static_prio)
+{
+ return prio_deadline_diff(USER_PRIO(static_prio));
+}
+
+static inline int longest_deadline_diff(void)
+{
+ return prio_deadline_diff(39);
+}
+
+static inline int ms_longest_deadline_diff(void)
+{
+ return NS_TO_MS(longest_deadline_diff());
+}
+
+/*
+ * The time_slice is only refilled when it is empty and that is when we set a
+ * new deadline.
+ */
+static void time_slice_expired(struct task_struct *p)
+{
+ p->time_slice = timeslice();
+ p->deadline = grq.niffies + task_deadline_diff(p);
+#ifdef CONFIG_SMT_NICE
+ if (!p->mm)
+ p->smt_bias = 0;
+ else if (rt_task(p))
+ p->smt_bias = 1 << 30;
+ else if (task_running_iso(p))
+ p->smt_bias = 1 << 29;
+ else if (idleprio_task(p)) {
+ if (task_running_idle(p))
+ p->smt_bias = 0;
+ else
+ p->smt_bias = 1;
+ } else if (--p->smt_bias < 1)
+ p->smt_bias = MAX_PRIO - p->static_prio;
+#endif
+}
+
+/*
+ * Timeslices below RESCHED_US are considered as good as expired as there's no
+ * point rescheduling when there's so little time left. SCHED_BATCH tasks
+ * have been flagged be not latency sensitive and likely to be fully CPU
+ * bound so every time they're rescheduled they have their time_slice
+ * refilled, but get a new later deadline to have little effect on
+ * SCHED_NORMAL tasks.
+
+ */
+static inline void check_deadline(struct task_struct *p)
+{
+ if (p->time_slice < RESCHED_US || batch_task(p))
+ time_slice_expired(p);
+}
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/*
+ * Scheduler queue bitmap specific find next bit.
+ */
+static inline unsigned long
+next_sched_bit(const unsigned long *addr, unsigned long offset)
+{
+ const unsigned long *p;
+ unsigned long result;
+ unsigned long size;
+ unsigned long tmp;
+
+ size = PRIO_LIMIT;
+ if (offset >= size)
+ return size;
+
+ p = addr + BITOP_WORD(offset);
+ result = offset & ~(BITS_PER_LONG-1);
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+/*
+ * O(n) lookup of all tasks in the global runqueue. The real brainfuck
+ * of lock contention and O(n). It's not really O(n) as only the queued,
+ * but not running tasks are scanned, and is O(n) queued in the worst case
+ * scenario only because the right task can be found before scanning all of
+ * them.
+ * Tasks are selected in this order:
+ * Real time tasks are selected purely by their static priority and in the
+ * order they were queued, so the lowest value idx, and the first queued task
+ * of that priority value is chosen.
+ * If no real time tasks are found, the SCHED_ISO priority is checked, and
+ * all SCHED_ISO tasks have the same priority value, so they're selected by
+ * the earliest deadline value.
+ * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the
+ * earliest deadline.
+ * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are
+ * selected by the earliest deadline.
+ */
+static inline struct
+task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle)
+{
+ struct task_struct *edt = NULL;
+ unsigned long idx = -1;
+
+ do {
+ struct list_head *queue;
+ struct task_struct *p;
+ u64 earliest_deadline;
+
+ idx = next_sched_bit(grq.prio_bitmap, ++idx);
+ if (idx >= PRIO_LIMIT)
+ return idle;
+ queue = grq.queue + idx;
+
+ if (idx < MAX_RT_PRIO) {
+ /* We found an rt task */
+ list_for_each_entry(p, queue, run_list) {
+ /* Make sure cpu affinity is ok */
+ if (needs_other_cpu(p, cpu))
+ continue;
+ edt = p;
+ goto out_take;
+ }
+ /*
+ * None of the RT tasks at this priority can run on
+ * this cpu
+ */
+ continue;
+ }
+
+ /*
+ * No rt tasks. Find the earliest deadline task. Now we're in
+ * O(n) territory.
+ */
+ earliest_deadline = ~0ULL;
+ list_for_each_entry(p, queue, run_list) {
+ u64 dl;
+
+ /* Make sure cpu affinity is ok */
+ if (needs_other_cpu(p, cpu))
+ continue;
+
+#ifdef CONFIG_SMT_NICE
+ if (!smt_should_schedule(p, cpu))
+ continue;
+#endif
+ /*
+ * Soft affinity happens here by not scheduling a task
+ * with its sticky flag set that ran on a different CPU
+ * last when the CPU is scaling, or by greatly biasing
+ * against its deadline when not, based on cpu cache
+ * locality.
+ */
+ if (sched_interactive)
+ dl = p->deadline;
+ else {
+ int tcpu = task_cpu(p);
+
+ if (tcpu != cpu && task_sticky(p) && scaling_rq(rq))
+ continue;
+ dl = p->deadline << locality_diff(tcpu, rq);
+ }
+
+ if (deadline_before(dl, earliest_deadline)) {
+ earliest_deadline = dl;
+ edt = p;
+ }
+ }
+ } while (!edt);
+
+out_take:
+ take_task(cpu, edt);
+ return edt;
+}
+
+
+/*
+ * Print scheduling while atomic bug:
+ */
+static noinline void __schedule_bug(struct task_struct *prev)
+{
+ if (oops_in_progress)
+ return;
+
+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
+ prev->comm, prev->pid, preempt_count());
+
+ debug_show_held_locks(prev);
+ print_modules();
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
+#ifdef CONFIG_DEBUG_PREEMPT
+ if (in_atomic_preempt_off()) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(current->preempt_disable_ip);
+ pr_cont("\n");
+ }
+#endif
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+
+/*
+ * Various schedule()-time debugging checks and statistics:
+ */
+static inline void schedule_debug(struct task_struct *prev)
+{
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+ BUG_ON(task_stack_end_corrupted(prev));
+#endif
+
+ if (unlikely(in_atomic_preempt_off())) {
+ __schedule_bug(prev);
+ preempt_count_set(PREEMPT_DISABLED);
+ }
+ rcu_sleep_check();
+
+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+ schedstat_inc(this_rq(), sched_count);
+}
+
+/*
+ * The currently running task's information is all stored in rq local data
+ * which is only modified by the local CPU, thereby allowing the data to be
+ * changed without grabbing the grq lock.
+ */
+static inline void set_rq_task(struct rq *rq, struct task_struct *p)
+{
+ rq->rq_time_slice = p->time_slice;
+ rq->rq_deadline = p->deadline;
+ rq->rq_last_ran = p->last_ran = rq->clock_task;
+ rq->rq_policy = p->policy;
+ rq->rq_prio = p->prio;
+#ifdef CONFIG_SMT_NICE
+ rq->rq_mm = p->mm;
+ rq->rq_smt_bias = p->smt_bias;
+#endif
+ if (p != rq->idle)
+ rq->rq_running = true;
+ else
+ rq->rq_running = false;
+}
+
+static void reset_rq_task(struct rq *rq, struct task_struct *p)
+{
+ rq->rq_policy = p->policy;
+ rq->rq_prio = p->prio;
+#ifdef CONFIG_SMT_NICE
+ rq->rq_smt_bias = p->smt_bias;
+#endif
+}
+
+#ifdef CONFIG_SMT_NICE
+/* Iterate over smt siblings when we've scheduled a process on cpu and decide
+ * whether they should continue running or be descheduled. */
+static void check_smt_siblings(int cpu)
+{
+ int other_cpu;
+
+ for_each_cpu(other_cpu, thread_cpumask(cpu)) {
+ struct task_struct *p;
+ struct rq *rq;
+
+ if (other_cpu == cpu)
+ continue;
+ rq = cpu_rq(other_cpu);
+ if (rq_idle(rq))
+ continue;
+ if (!rq->online)
+ continue;
+ p = rq->curr;
+ if (!smt_should_schedule(p, cpu)) {
+ set_tsk_need_resched(p);
+ smp_send_reschedule(other_cpu);
+ }
+ }
+}
+
+static void wake_smt_siblings(int cpu)
+{
+ int other_cpu;
+
+ if (!queued_notrunning())
+ return;
+
+ for_each_cpu(other_cpu, thread_cpumask(cpu)) {
+ struct rq *rq;
+
+ if (other_cpu == cpu)
+ continue;
+ rq = cpu_rq(other_cpu);
+ if (rq_idle(rq)) {
+ struct task_struct *p = rq->curr;
+
+ set_tsk_need_resched(p);
+ smp_send_reschedule(other_cpu);
+ }
+ }
+}
+#else
+static void check_smt_siblings(int __maybe_unused cpu) {}
+static void wake_smt_siblings(int __maybe_unused cpu) {}
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ *
+ * The main means of driving the scheduler and thus entering this function are:
+ *
+ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
+ *
+ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
+ * paths. For example, see arch/x86/entry_64.S.
+ *
+ * To drive preemption between tasks, the scheduler sets the flag in timer
+ * interrupt handler scheduler_tick().
+ *
+ * 3. Wakeups don't really cause entry into schedule(). They add a
+ * task to the run-queue and that's it.
+ *
+ * Now, if the new task added to the run-queue preempts the current
+ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
+ * called on the nearest possible occasion:
+ *
+ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
+ *
+ * - in syscall or exception context, at the next outmost
+ * preempt_enable(). (this might be as soon as the wake_up()'s
+ * spin_unlock()!)
+ *
+ * - in IRQ context, return from interrupt-handler to
+ * preemptible context
+ *
+ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
+ * then at the next:
+ *
+ * - cond_resched() call
+ * - explicit schedule() call
+ * - return from syscall or exception to user-space
+ * - return from interrupt-handler to user-space
+ *
+ * WARNING: must be called with preemption disabled!
+ */
+static void __sched notrace __schedule(bool preempt)
+{
+ struct task_struct *prev, *next, *idle;
+ unsigned long *switch_count;
+ bool deactivate = false;
+ struct rq *rq;
+ int cpu;
+
+ cpu = smp_processor_id();
+ rq = cpu_rq(cpu);
+ prev = rq->curr;
+
+ /*
+ * do_exit() calls schedule() with preemption disabled as an exception;
+ * however we must fix that up, otherwise the next task will see an
+ * inconsistent (higher) preempt count.
+ *
+ * It also avoids the below schedule_debug() test from complaining
+ * about this.
+ */
+ if (unlikely(prev->state == TASK_DEAD))
+ preempt_enable_no_resched_notrace();
+
+ schedule_debug(prev);
+
+ local_irq_disable();
+ rcu_note_context_switch();
+
+ /*
+ * Make sure that signal_pending_state()->signal_pending() below
+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+ * done by the caller to avoid the race with signal_wake_up().
+ */
+ smp_mb__before_spinlock();
+ grq_lock();
+
+ switch_count = &prev->nivcsw;
+ if (!preempt && prev->state) {
+ if (unlikely(signal_pending_state(prev->state, prev))) {
+ prev->state = TASK_RUNNING;
+ } else {
+ deactivate = true;
+ prev->on_rq = 0;
+
+ /*
+ * If a worker is going to sleep, notify and
+ * ask workqueue whether it wants to wake up a
+ * task to maintain concurrency. If so, wake
+ * up the task.
+ */
+ if (prev->flags & PF_WQ_WORKER) {
+ struct task_struct *to_wakeup;
+
+ to_wakeup = wq_worker_sleeping(prev, cpu);
+ if (to_wakeup) {
+ /* This shouldn't happen, but does */
+ if (unlikely(to_wakeup == prev))
+ deactivate = false;
+ else
+ try_to_wake_up_local(to_wakeup);
+ }
+ }
+ }
+ switch_count = &prev->nvcsw;
+ }
+
+ update_clocks(rq);
+ update_cpu_clock_switch(rq, prev);
+ if (rq->clock - rq->last_tick > HALF_JIFFY_NS)
+ rq->dither = false;
+ else
+ rq->dither = true;
+
+ clear_tsk_need_resched(prev);
+ clear_preempt_need_resched();
+
+ idle = rq->idle;
+ if (idle != prev) {
+ /* Update all the information stored on struct rq */
+ prev->time_slice = rq->rq_time_slice;
+ prev->deadline = rq->rq_deadline;
+ check_deadline(prev);
+ prev->last_ran = rq->clock_task;
+
+ /* Task changed affinity off this CPU */
+ if (likely(!needs_other_cpu(prev, cpu))) {
+ if (!deactivate) {
+ if (!queued_notrunning()) {
+ /*
+ * We now know prev is the only thing that is
+ * awaiting CPU so we can bypass rechecking for
+ * the earliest deadline task and just run it
+ * again.
+ */
+ set_rq_task(rq, prev);
+ check_smt_siblings(cpu);
+ grq_unlock_irq();
+ goto rerun_prev_unlocked;
+ } else
+ swap_sticky(rq, cpu, prev);
+ }
+ }
+ return_task(prev, rq, deactivate);
+ }
+
+ if (unlikely(!queued_notrunning())) {
+ /*
+ * This CPU is now truly idle as opposed to when idle is
+ * scheduled as a high priority task in its own right.
+ */
+ next = idle;
+ schedstat_inc(rq, sched_goidle);
+ set_cpuidle_map(cpu);
+ } else {
+ next = earliest_deadline_task(rq, cpu, idle);
+ if (likely(next->prio != PRIO_LIMIT))
+ clear_cpuidle_map(cpu);
+ else
+ set_cpuidle_map(cpu);
+ }
+
+ if (likely(prev != next)) {
+ /*
+ * Don't reschedule an idle task or deactivated tasks
+ */
+ if (prev != idle && !deactivate)
+ resched_suitable_idle(prev);
+ /*
+ * Don't stick tasks when a real time task is going to run as
+ * they may literally get stuck.
+ */
+ if (rt_task(next))
+ unstick_task(rq, prev);
+ set_rq_task(rq, next);
+ if (next != idle)
+ check_smt_siblings(cpu);
+ else
+ wake_smt_siblings(cpu);
+ grq.nr_switches++;
+ prev->on_cpu = false;
+ next->on_cpu = true;
+ rq->curr = next;
+ ++*switch_count;
+
+ trace_sched_switch(preempt, prev, next);
+ rq = context_switch(rq, prev, next); /* unlocks the grq */
+ cpu = cpu_of(rq);
+ idle = rq->idle;
+ } else {
+ check_smt_siblings(cpu);
+ grq_unlock_irq();
+ }
+
+rerun_prev_unlocked:
+ return;
+}
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+ if (!tsk->state || tsk_is_pi_blocked(tsk) ||
+ preempt_count() ||
+ signal_pending_state(tsk->state, tsk))
+ return;
+
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+ if (blk_needs_flush_plug(tsk))
+ blk_schedule_flush_plug(tsk);
+}
+
+asmlinkage __visible void __sched schedule(void)
+{
+ struct task_struct *tsk = current;
+
+ sched_submit_work(tsk);
+ do {
+ preempt_disable();
+ __schedule(false);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+}
+
+EXPORT_SYMBOL(schedule);
+
+#ifdef CONFIG_CONTEXT_TRACKING
+asmlinkage __visible void __sched schedule_user(void)
+{
+ /*
+ * If we come here after a random call to set_need_resched(),
+ * or we have been woken up remotely but the IPI has not yet arrived,
+ * we haven't yet exited the RCU idle mode. Do it here manually until
+ * we find a better solution.
+ *
+ * NB: There are buggy callers of this function. Ideally we
+ * should warn if prev_state != IN_USER, but that will trigger
+ * too frequently to make sense yet.
+ */
+ enum ctx_state prev_state = exception_enter();
+ schedule();
+ exception_exit(prev_state);
+}
+#endif
+
+/**
+ * schedule_preempt_disabled - called with preemption disabled
+ *
+ * Returns with preemption disabled. Note: preempt_count must be 1
+ */
+void __sched schedule_preempt_disabled(void)
+{
+ sched_preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+}
+
+static void __sched notrace preempt_schedule_common(void)
+{
+ do {
+ preempt_disable_notrace();
+ __schedule(true);
+ preempt_enable_no_resched_notrace();
+
+ /*
+ * Check again in case we missed a preemption opportunity
+ * between schedule and now.
+ */
+ } while (need_resched());
+}
+
+#ifdef CONFIG_PREEMPT
+/*
+ * this is the entry point to schedule() from in-kernel preemption
+ * off of preempt_enable. Kernel preemptions off return from interrupt
+ * occur there and call schedule directly.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule(void)
+{
+ /*
+ * If there is a non-zero preempt_count or interrupts are disabled,
+ * we do not want to preempt the current task. Just return..
+ */
+ if (likely(!preemptible()))
+ return;
+
+ preempt_schedule_common();
+}
+NOKPROBE_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL(preempt_schedule);
+
+/**
+ * preempt_schedule_notrace - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+{
+ enum ctx_state prev_ctx;
+
+ if (likely(!preemptible()))
+ return;
+
+ do {
+ preempt_disable_notrace();
+ /*
+ * Needs preempt disabled in case user_exit() is traced
+ * and the tracer calls preempt_enable_notrace() causing
+ * an infinite recursion.
+ */
+ prev_ctx = exception_enter();
+ __schedule(true);
+ exception_exit(prev_ctx);
+
+ preempt_enable_no_resched_notrace();
+ } while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
+
+#endif /* CONFIG_PREEMPT */
+
+/*
+ * this is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage __visible void __sched preempt_schedule_irq(void)
+{
+ enum ctx_state prev_state;
+
+ /* Catch callers which need to be fixed */
+ BUG_ON(preempt_count() || !irqs_disabled());
+
+ prev_state = exception_enter();
+
+ do {
+ preempt_disable();
+ local_irq_enable();
+ __schedule(true);
+ local_irq_disable();
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+
+ exception_exit(prev_state);
+}
+
+int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
+ void *key)
+{
+ return try_to_wake_up(curr->private, mode, wake_flags);
+}
+EXPORT_SYMBOL(default_wake_function);
+
+#ifdef CONFIG_RT_MUTEXES
+
+/*
+ * rt_mutex_setprio - set the current priority of a task
+ * @p: task
+ * @prio: prio value (kernel-internal form)
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
+ */
+void rt_mutex_setprio(struct task_struct *p, int prio)
+{
+ unsigned long flags;
+ int queued, oldprio;
+ struct rq *rq;
+
+ BUG_ON(prio < 0 || prio > MAX_PRIO);
+
+ rq = task_grq_lock(p, &flags);
+
+ /*
+ * Idle task boosting is a nono in general. There is one
+ * exception, when PREEMPT_RT and NOHZ is active:
+ *
+ * The idle task calls get_next_timer_interrupt() and holds
+ * the timer wheel base->lock on the CPU and another CPU wants
+ * to access the timer (probably to cancel it). We can safely
+ * ignore the boosting request, as the idle CPU runs this code
+ * with interrupts disabled and will complete the lock
+ * protected section without being interrupted. So there is no
+ * real need to boost.
+ */
+ if (unlikely(p == rq->idle)) {
+ WARN_ON(p != rq->curr);
+ WARN_ON(p->pi_blocked_on);
+ goto out_unlock;
+ }
+
+ trace_sched_pi_setprio(p, prio);
+ oldprio = p->prio;
+ queued = task_queued(p);
+ if (queued)
+ dequeue_task(p);
+ p->prio = prio;
+ if (task_running(p) && prio > oldprio)
+ resched_task(p);
+ if (queued) {
+ enqueue_task(p, rq);
+ try_preempt(p, rq);
+ }
+
+out_unlock:
+ task_grq_unlock(&flags);
+}
+
+#endif
+
+/*
+ * Adjust the deadline for when the priority is to change, before it's
+ * changed.
+ */
+static inline void adjust_deadline(struct task_struct *p, int new_prio)
+{
+ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p);
+}
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+ int queued, new_static, old_static;
+ unsigned long flags;
+ struct rq *rq;
+
+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
+ return;
+ new_static = NICE_TO_PRIO(nice);
+ /*
+ * We have to be careful, if called from sys_setpriority(),
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ rq = time_task_grq_lock(p, &flags);
+ /*
+ * The RT priorities are set via sched_setscheduler(), but we still
+ * allow the 'normal' nice value to be set - but as expected
+ * it wont have any effect on scheduling until the task is
+ * not SCHED_NORMAL/SCHED_BATCH:
+ */
+ if (has_rt_policy(p)) {
+ p->static_prio = new_static;
+ goto out_unlock;
+ }
+ queued = task_queued(p);
+ if (queued)
+ dequeue_task(p);
+
+ adjust_deadline(p, new_static);
+ old_static = p->static_prio;
+ p->static_prio = new_static;
+ p->prio = effective_prio(p);
+
+ if (queued) {
+ enqueue_task(p, rq);
+ if (new_static < old_static)
+ try_preempt(p, rq);
+ } else if (task_running(p)) {
+ reset_rq_task(rq, p);
+ if (old_static < new_static)
+ resched_task(p);
+ }
+out_unlock:
+ task_grq_unlock(&flags);
+}
+EXPORT_SYMBOL(set_user_nice);
+
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const struct task_struct *p, const int nice)
+{
+ /* convert nice value [19,-20] to rlimit style value [1,40] */
+ int nice_rlim = nice_to_rlimit(nice);
+
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+ capable(CAP_SYS_NICE));
+}
+
+#ifdef __ARCH_WANT_SYS_NICE
+
+/*
+ * sys_nice - change the priority of the current process.
+ * @increment: priority increment
+ *
+ * sys_setpriority is a more generic, but much slower function that
+ * does similar things.
+ */
+SYSCALL_DEFINE1(nice, int, increment)
+{
+ long nice, retval;
+
+ /*
+ * Setpriority might change our priority at the same moment.
+ * We don't have to worry. Conceptually one call occurs first
+ * and we have a single winner.
+ */
+
+ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
+ nice = task_nice(current) + increment;
+
+ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
+ if (increment < 0 && !can_nice(current, nice))
+ return -EPERM;
+
+ retval = security_task_setnice(current, nice);
+ if (retval)
+ return retval;
+
+ set_user_nice(current, nice);
+ return 0;
+}
+
+#endif
+
+/**
+ * task_prio - return the priority value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The priority value as seen by users in /proc.
+ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes
+ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO).
+ */
+int task_prio(const struct task_struct *p)
+{
+ int delta, prio = p->prio - MAX_RT_PRIO;
+
+ /* rt tasks and iso tasks */
+ if (prio <= 0)
+ goto out;
+
+ /* Convert to ms to avoid overflows */
+ delta = NS_TO_MS(p->deadline - grq.niffies);
+ delta = delta * 40 / ms_longest_deadline_diff();
+ if (delta > 0 && delta <= 80)
+ prio += delta;
+ if (idleprio_task(p))
+ prio += 40;
+out:
+ return prio;
+}
+
+/**
+ * idle_cpu - is a given cpu idle currently?
+ * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+int idle_cpu(int cpu)
+{
+ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
+}
+
+/**
+ * idle_task - return the idle task for a given cpu.
+ * @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
+ */
+struct task_struct *idle_task(int cpu)
+{
+ return cpu_rq(cpu)->idle;
+}
+
+/**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_vpid(pid) : current;
+}
+
+/* Actually do priority change: must hold grq lock. */
+static void __setscheduler(struct task_struct *p, struct rq *rq, int policy,
+ int prio, bool keep_boost)
+{
+ int oldrtprio, oldprio;
+
+ p->policy = policy;
+ oldrtprio = p->rt_priority;
+ p->rt_priority = prio;
+ p->normal_prio = normal_prio(p);
+ oldprio = p->prio;
+ /*
+ * Keep a potential priority boosting if called from
+ * sched_setscheduler().
+ */
+ if (keep_boost) {
+ /*
+ * Take priority boosted tasks into account. If the new
+ * effective priority is unchanged, we just store the new
+ * normal parameters and do not touch the scheduler class and
+ * the runqueue. This will be done when the task deboost
+ * itself.
+ */
+ p->prio = rt_mutex_get_effective_prio(p, p->normal_prio);
+ } else
+ p->prio = p->normal_prio;
+ if (task_running(p)) {
+ reset_rq_task(rq, p);
+ /* Resched only if we might now be preempted */
+ if (p->prio > oldprio || p->rt_priority > oldrtprio)
+ resched_task(p);
+ }
+}
+
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+ rcu_read_unlock();
+ return match;
+}
+
+static int
+__sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param, bool user, bool pi)
+{
+ struct sched_param zero_param = { .sched_priority = 0 };
+ int queued, retval, oldpolicy = -1;
+ unsigned long flags, rlim_rtprio = 0;
+ int reset_on_fork;
+ struct rq *rq;
+
+ /* may grab non-irq protected spin_locks */
+ BUG_ON(in_interrupt());
+
+ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
+ unsigned long lflags;
+
+ if (!lock_task_sighand(p, &lflags))
+ return -ESRCH;
+ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
+ unlock_task_sighand(p, &lflags);
+ if (rlim_rtprio)
+ goto recheck;
+ /*
+ * If the caller requested an RT policy without having the
+ * necessary rights, we downgrade the policy to SCHED_ISO.
+ * We also set the parameter to zero to pass the checks.
+ */
+ policy = SCHED_ISO;
+ param = &zero_param;
+ }
+recheck:
+ /* double check policy once rq lock held */
+ if (policy < 0) {
+ reset_on_fork = p->sched_reset_on_fork;
+ policy = oldpolicy = p->policy;
+ } else {
+ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
+ policy &= ~SCHED_RESET_ON_FORK;
+
+ if (!SCHED_RANGE(policy))
+ return -EINVAL;
+ }
+
+ /*
+ * Valid priorities for SCHED_FIFO and SCHED_RR are
+ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
+ * SCHED_BATCH is 0.
+ */
+ if (param->sched_priority < 0 ||
+ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) ||
+ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1))
+ return -EINVAL;
+ if (is_rt_policy(policy) != (param->sched_priority != 0))
+ return -EINVAL;
+
+ /*
+ * Allow unprivileged RT tasks to decrease priority:
+ */
+ if (user && !capable(CAP_SYS_NICE)) {
+ if (is_rt_policy(policy)) {
+ unsigned long rlim_rtprio =
+ task_rlimit(p, RLIMIT_RTPRIO);
+
+ /* can't set/change the rt policy */
+ if (policy != p->policy && !rlim_rtprio)
+ return -EPERM;
+
+ /* can't increase priority */
+ if (param->sched_priority > p->rt_priority &&
+ param->sched_priority > rlim_rtprio)
+ return -EPERM;
+ } else {
+ switch (p->policy) {
+ /*
+ * Can only downgrade policies but not back to
+ * SCHED_NORMAL
+ */
+ case SCHED_ISO:
+ if (policy == SCHED_ISO)
+ goto out;
+ if (policy == SCHED_NORMAL)
+ return -EPERM;
+ break;
+ case SCHED_BATCH:
+ if (policy == SCHED_BATCH)
+ goto out;
+ if (policy != SCHED_IDLEPRIO)
+ return -EPERM;
+ break;
+ case SCHED_IDLEPRIO:
+ if (policy == SCHED_IDLEPRIO)
+ goto out;
+ return -EPERM;
+ default:
+ break;
+ }
+ }
+
+ /* can't change other user's priorities */
+ if (!check_same_owner(p))
+ return -EPERM;
+
+ /* Normal users shall not reset the sched_reset_on_fork flag */
+ if (p->sched_reset_on_fork && !reset_on_fork)
+ return -EPERM;
+ }
+
+ if (user) {
+ retval = security_task_setscheduler(p);
+ if (retval)
+ return retval;
+ }
+
+ /*
+ * make sure no PI-waiters arrive (or leave) while we are
+ * changing the priority of the task:
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * To be able to change p->policy safely, the grunqueue lock must be
+ * held.
+ */
+ rq = __task_grq_lock(p);
+
+ /*
+ * Changing the policy of the stop threads its a very bad idea
+ */
+ if (p == rq->stop) {
+ __task_grq_unlock();
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * If not changing anything there's no need to proceed further:
+ */
+ if (unlikely(policy == p->policy && (!is_rt_policy(policy) ||
+ param->sched_priority == p->rt_priority))) {
+
+ __task_grq_unlock();
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return 0;
+ }
+
+ /* recheck policy now with rq lock held */
+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
+ policy = oldpolicy = -1;
+ __task_grq_unlock();
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ goto recheck;
+ }
+ update_clocks(rq);
+ p->sched_reset_on_fork = reset_on_fork;
+
+ queued = task_queued(p);
+ if (queued)
+ dequeue_task(p);
+ __setscheduler(p, rq, policy, param->sched_priority, pi);
+ if (queued) {
+ enqueue_task(p, rq);
+ try_preempt(p, rq);
+ }
+ __task_grq_unlock();
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ if (pi)
+ rt_mutex_adjust_pi(p);
+out:
+ return 0;
+}
+
+/**
+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ *
+ * NOTE that the task may be already dead.
+ */
+int sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return __sched_setscheduler(p, policy, param, true, true);
+}
+
+EXPORT_SYMBOL_GPL(sched_setscheduler);
+
+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
+{
+ const struct sched_param param = { .sched_priority = attr->sched_priority };
+ int policy = attr->sched_policy;
+
+ return __sched_setscheduler(p, policy, &param, true, true);
+}
+EXPORT_SYMBOL_GPL(sched_setattr);
+
+/**
+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Just like sched_setscheduler, only don't bother checking if the
+ * current context has permission. For example, this is needed in
+ * stop_machine(): we create temporary high priority worker threads,
+ * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return __sched_setscheduler(p, policy, param, false, true);
+}
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
+
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+{
+ struct sched_param lparam;
+ struct task_struct *p;
+ int retval;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+ return -EFAULT;
+
+ rcu_read_lock();
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (p != NULL)
+ retval = sched_setscheduler(p, policy, &lparam);
+ rcu_read_unlock();
+
+ return retval;
+}
+
+/*
+ * Mimics kernel/events/core.c perf_copy_attr().
+ */
+static int sched_copy_attr(struct sched_attr __user *uattr,
+ struct sched_attr *attr)
+{
+ u32 size;
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
+ return -EFAULT;
+
+ /*
+ * zero the full structure, so that a short copy will be nice.
+ */
+ memset(attr, 0, sizeof(*attr));
+
+ ret = get_user(size, &uattr->size);
+ if (ret)
+ return ret;
+
+ if (size > PAGE_SIZE) /* silly large */
+ goto err_size;
+
+ if (!size) /* abi compat */
+ size = SCHED_ATTR_SIZE_VER0;
+
+ if (size < SCHED_ATTR_SIZE_VER0)
+ goto err_size;
+
+ /*
+ * If we're handed a bigger struct than we know of,
+ * ensure all the unknown bits are 0 - i.e. new
+ * user-space does not rely on any kernel feature
+ * extensions we dont know about yet.
+ */
+ if (size > sizeof(*attr)) {
+ unsigned char __user *addr;
+ unsigned char __user *end;
+ unsigned char val;
+
+ addr = (void __user *)uattr + sizeof(*attr);
+ end = (void __user *)uattr + size;
+
+ for (; addr < end; addr++) {
+ ret = get_user(val, addr);
+ if (ret)
+ return ret;
+ if (val)
+ goto err_size;
+ }
+ size = sizeof(*attr);
+ }
+
+ ret = copy_from_user(attr, uattr, size);
+ if (ret)
+ return -EFAULT;
+
+ /*
+ * XXX: do we want to be lenient like existing syscalls; or do we want
+ * to be strict and return an error on out-of-bounds values?
+ */
+ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
+
+ /* sched/core.c uses zero here but we already know ret is zero */
+ return 0;
+
+err_size:
+ put_user(sizeof(*attr), &uattr->size);
+ return -E2BIG;
+}
+
+/**
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ *
+ * Return: 0 on success. An error code otherwise.
+ * @param: structure containing the new RT priority.
+ */
+asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+ struct sched_param __user *param)
+{
+ /* negative values for policy are not valid */
+ if (policy < 0)
+ return -EINVAL;
+
+ return do_sched_setscheduler(pid, policy, param);
+}
+
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY -1
+
+/**
+ * sys_sched_setparam - set/change the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
+{
+ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
+}
+
+/**
+ * sys_sched_setattr - same as above, but with extended sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ */
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, flags)
+{
+ struct sched_attr attr;
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0 || flags)
+ return -EINVAL;
+
+ retval = sched_copy_attr(uattr, &attr);
+ if (retval)
+ return retval;
+
+ if ((int)attr.sched_policy < 0)
+ return -EINVAL;
+
+ rcu_read_lock();
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (p != NULL)
+ retval = sched_setattr(p, &attr);
+ rcu_read_unlock();
+
+ return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+ * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
+ */
+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+{
+ struct task_struct *p;
+ int retval = -EINVAL;
+
+ if (pid < 0)
+ goto out_nounlock;
+
+ retval = -ESRCH;
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ if (p) {
+ retval = security_task_getscheduler(p);
+ if (!retval)
+ retval = p->policy;
+ }
+ rcu_read_unlock();
+
+out_nounlock:
+ return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
+ */
+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+{
+ struct sched_param lp = { .sched_priority = 0 };
+ struct task_struct *p;
+ int retval = -EINVAL;
+
+ if (!param || pid < 0)
+ goto out_nounlock;
+
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ if (has_rt_policy(p))
+ lp.sched_priority = p->rt_priority;
+ rcu_read_unlock();
+
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+ return retval;
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+static int sched_read_attr(struct sched_attr __user *uattr,
+ struct sched_attr *attr,
+ unsigned int usize)
+{
+ int ret;
+
+ if (!access_ok(VERIFY_WRITE, uattr, usize))
+ return -EFAULT;
+
+ /*
+ * If we're handed a smaller struct than we know of,
+ * ensure all the unknown bits are 0 - i.e. old
+ * user-space does not get uncomplete information.
+ */
+ if (usize < sizeof(*attr)) {
+ unsigned char *addr;
+ unsigned char *end;
+
+ addr = (void *)attr + usize;
+ end = (void *)attr + sizeof(*attr);
+
+ for (; addr < end; addr++) {
+ if (*addr)
+ return -EFBIG;
+ }
+
+ attr->size = usize;
+ }
+
+ ret = copy_to_user(uattr, attr, attr->size);
+ if (ret)
+ return -EFAULT;
+
+ /* sched/core.c uses zero here but we already know ret is zero */
+ return ret;
+}
+
+/**
+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, size, unsigned int, flags)
+{
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
+ };
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0 || size > PAGE_SIZE ||
+ size < SCHED_ATTR_SIZE_VER0 || flags)
+ return -EINVAL;
+
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ attr.sched_policy = p->policy;
+ if (rt_task(p))
+ attr.sched_priority = p->rt_priority;
+ else
+ attr.sched_nice = task_nice(p);
+
+ rcu_read_unlock();
+
+ retval = sched_read_attr(uattr, &attr, size);
+ return retval;
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
+ cpumask_var_t cpus_allowed, new_mask;
+ struct task_struct *p;
+ int retval;
+
+ get_online_cpus();
+ rcu_read_lock();
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ rcu_read_unlock();
+ put_online_cpus();
+ return -ESRCH;
+ }
+
+ /* Prevent p going away */
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (p->flags & PF_NO_SETAFFINITY) {
+ retval = -EINVAL;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+ retval = -EPERM;
+ if (!check_same_owner(p)) {
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ goto out_unlock;
+ }
+ rcu_read_unlock();
+ }
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, in_mask, cpus_allowed);
+again:
+ retval = __set_cpus_allowed_ptr(p, new_mask, true);
+
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(new_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+ }
+out_unlock:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
+ put_task_struct(p);
+ put_online_cpus();
+ return retval;
+}
+
+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
+ cpumask_t *new_mask)
+{
+ if (len < sizeof(cpumask_t)) {
+ memset(new_mask, 0, sizeof(cpumask_t));
+ } else if (len > sizeof(cpumask_t)) {
+ len = sizeof(cpumask_t);
+ }
+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
+}
+
+
+/**
+ * sys_sched_setaffinity - set the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
+{
+ cpumask_var_t new_mask;
+ int retval;
+
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
+ if (retval == 0)
+ retval = sched_setaffinity(pid, new_mask);
+ free_cpumask_var(new_mask);
+ return retval;
+}
+
+long sched_getaffinity(pid_t pid, cpumask_t *mask)
+{
+ struct task_struct *p;
+ unsigned long flags;
+ int retval;
+
+ get_online_cpus();
+ rcu_read_lock();
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ grq_lock_irqsave(&flags);
+ cpumask_and(mask, tsk_cpus_allowed(p), cpu_active_mask);
+ grq_unlock_irqrestore(&flags);
+
+out_unlock:
+ rcu_read_unlock();
+ put_online_cpus();
+
+ return retval;
+}
+
+/**
+ * sys_sched_getaffinity - get the cpu affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
+{
+ int ret;
+ cpumask_var_t mask;
+
+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(unsigned long)-1))
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = sched_getaffinity(pid, mask);
+ if (ret == 0) {
+ size_t retlen = min_t(size_t, len, cpumask_size());
+
+ if (copy_to_user(user_mask_ptr, mask, retlen))
+ ret = -EFAULT;
+ else
+ ret = retlen;
+ }
+ free_cpumask_var(mask);
+
+ return ret;
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. It does this by
+ * scheduling away the current task. If it still has the earliest deadline
+ * it will be scheduled again as the next task.
+ *
+ * Return: 0.
+ */
+SYSCALL_DEFINE0(sched_yield)
+{
+ struct task_struct *p;
+
+ p = current;
+ grq_lock_irq();
+ schedstat_inc(task_rq(p), yld_count);
+ requeue_task(p);
+
+ /*
+ * Since we are going to call schedule() anyway, there's
+ * no need to preempt or enable interrupts:
+ */
+ __release(grq.lock);
+ spin_release(&grq.lock.dep_map, 1, _THIS_IP_);
+ do_raw_spin_unlock(&grq.lock);
+ sched_preempt_enable_no_resched();
+
+ schedule();
+
+ return 0;
+}
+
+int __sched _cond_resched(void)
+{
+ if (should_resched(0)) {
+ preempt_schedule_common();
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(_cond_resched);
+
+/*
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
+ */
+int __cond_resched_lock(spinlock_t *lock)
+{
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held(lock);
+
+ if (spin_needbreak(lock) || resched) {
+ spin_unlock(lock);
+ if (resched)
+ preempt_schedule_common();
+ else
+ cpu_relax();
+ ret = 1;
+ spin_lock(lock);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__cond_resched_lock);
+
+int __sched __cond_resched_softirq(void)
+{
+ BUG_ON(!in_softirq());
+
+ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
+ local_bh_enable();
+ preempt_schedule_common();
+ local_bh_disable();
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__cond_resched_softirq);
+
+/**
+ * yield - yield the current processor to other threads.
+ *
+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
+ *
+ * The scheduler is at all times free to pick the calling task as the most
+ * eligible task to run, if removing the yield() call from your code breaks
+ * it, its already broken.
+ *
+ * Typical broken usage is:
+ *
+ * while (!event)
+ * yield();
+ *
+ * where one assumes that yield() will let 'the other' process run that will
+ * make event true. If the current task is a SCHED_FIFO task that will never
+ * happen. Never use yield() as a progress guarantee!!
+ *
+ * If you want to use yield() to wait for something, use wait_event().
+ * If you want to use yield() to be 'nice' for others, use cond_resched().
+ * If you still want to use yield(), do not!
+ */
+void __sched yield(void)
+{
+ set_current_state(TASK_RUNNING);
+ sys_sched_yield();
+}
+EXPORT_SYMBOL(yield);
+
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ * @p: target task
+ * @preempt: whether task preemption is allowed or not
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ *
+ * Return:
+ * true (>0) if we indeed boosted the target task.
+ * false (0) if we failed to boost the target.
+ * -ESRCH if there's no task to yield to.
+ */
+int __sched yield_to(struct task_struct *p, bool preempt)
+{
+ struct rq *rq, *p_rq;
+ unsigned long flags;
+ int yielded = 0;
+
+ rq = this_rq();
+ grq_lock_irqsave(&flags);
+ if (task_running(p) || p->state) {
+ yielded = -ESRCH;
+ goto out_unlock;
+ }
+
+ p_rq = task_rq(p);
+ yielded = 1;
+ if (p->deadline > rq->rq_deadline)
+ p->deadline = rq->rq_deadline;
+ p->time_slice += rq->rq_time_slice;
+ rq->rq_time_slice = 0;
+ if (p->time_slice > timeslice())
+ p->time_slice = timeslice();
+ if (preempt && rq != p_rq)
+ resched_curr(p_rq);
+out_unlock:
+ grq_unlock_irqrestore(&flags);
+
+ if (yielded > 0)
+ schedule();
+ return yielded;
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
+/*
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+
+long __sched io_schedule_timeout(long timeout)
+{
+ int old_iowait = current->in_iowait;
+ struct rq *rq;
+ long ret;
+
+ current->in_iowait = 1;
+ blk_schedule_flush_plug(current);
+
+ delayacct_blkio_start();
+ rq = raw_rq();
+ atomic_inc(&rq->nr_iowait);
+ ret = schedule_timeout(timeout);
+ current->in_iowait = old_iowait;
+ atomic_dec(&rq->nr_iowait);
+ delayacct_blkio_end();
+
+ return ret;
+}
+EXPORT_SYMBOL(io_schedule_timeout);
+
+/**
+ * sys_sched_get_priority_max - return maximum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+{
+ int ret = -EINVAL;
+
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ ret = MAX_USER_RT_PRIO-1;
+ break;
+ case SCHED_NORMAL:
+ case SCHED_BATCH:
+ case SCHED_ISO:
+ case SCHED_IDLEPRIO:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * sys_sched_get_priority_min - return minimum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+{
+ int ret = -EINVAL;
+
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ ret = 1;
+ break;
+ case SCHED_NORMAL:
+ case SCHED_BATCH:
+ case SCHED_ISO:
+ case SCHED_IDLEPRIO:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * sys_sched_rr_get_interval - return the default timeslice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the timeslice value.
+ *
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
+ */
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+ struct timespec __user *, interval)
+{
+ struct task_struct *p;
+ unsigned int time_slice;
+ unsigned long flags;
+ int retval;
+ struct timespec t;
+
+ if (pid < 0)
+ return -EINVAL;
+
+ retval = -ESRCH;
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ grq_lock_irqsave(&flags);
+ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p));
+ grq_unlock_irqrestore(&flags);
+
+ rcu_read_unlock();
+ t = ns_to_timespec(time_slice);
+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
+ return retval;
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
+
+void sched_show_task(struct task_struct *p)
+{
+ unsigned long free = 0;
+ int ppid;
+ unsigned long state = p->state;
+
+ if (state)
+ state = __ffs(state) + 1;
+ printk(KERN_INFO "%-15.15s %c", p->comm,
+ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
+#if BITS_PER_LONG == 32
+ if (state == TASK_RUNNING)
+ printk(KERN_CONT " running ");
+ else
+ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
+#else
+ if (state == TASK_RUNNING)
+ printk(KERN_CONT " running task ");
+ else
+ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
+#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ free = stack_not_used(p);
+#endif
+ ppid = 0;
+ rcu_read_lock();
+ if (pid_alive(p))
+ ppid = task_pid_nr(rcu_dereference(p->real_parent));
+ rcu_read_unlock();
+ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
+ task_pid_nr(p), ppid,
+ (unsigned long)task_thread_info(p)->flags);
+
+ print_worker_info(KERN_INFO, p);
+ show_stack(p, NULL);
+}
+
+void show_state_filter(unsigned long state_filter)
+{
+ struct task_struct *g, *p;
+
+#if BITS_PER_LONG == 32
+ printk(KERN_INFO
+ " task PC stack pid father\n");
+#else
+ printk(KERN_INFO
+ " task PC stack pid father\n");
+#endif
+ rcu_read_lock();
+ for_each_process_thread(g, p) {
+ /*
+ * reset the NMI-timeout, listing all files on a slow
+ * console might take a lot of time:
+ */
+ touch_nmi_watchdog();
+ if (!state_filter || (p->state & state_filter))
+ sched_show_task(p);
+ }
+
+ touch_all_softlockup_watchdogs();
+
+ rcu_read_unlock();
+ /*
+ * Only show locks if all tasks are dumped:
+ */
+ if (!state_filter)
+ debug_show_all_locks();
+}
+
+void dump_cpu_task(int cpu)
+{
+ pr_info("Task dump for CPU %d:\n", cpu);
+ sched_show_task(cpu_curr(cpu));
+}
+
+#ifdef CONFIG_SMP
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+}
+
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ cpumask_copy(tsk_cpus_allowed(p), new_mask);
+}
+#endif
+
+/**
+ * init_idle - set up an idle thread for a given CPU
+ * @idle: task in question
+ * @cpu: cpu the idle task belongs to
+ *
+ * NOTE: this function does not set the idle thread's NEED_RESCHED
+ * flag, to make booting more robust.
+ */
+void init_idle(struct task_struct *idle, int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ time_lock_grq(rq);
+ idle->last_ran = rq->clock_task;
+ idle->state = TASK_RUNNING;
+ /* Setting prio to illegal value shouldn't matter when never queued */
+ idle->prio = PRIO_LIMIT;
+
+ kasan_unpoison_task_stack(idle);
+
+#ifdef CONFIG_SMP
+ /*
+ * It's possible that init_idle() gets called multiple times on a task,
+ * in that case do_set_cpus_allowed() will not do the right thing.
+ *
+ * And since this is boot we can forgo the serialisation.
+ */
+ set_cpus_allowed_common(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMT_NICE
+ idle->smt_bias = 0;
+#endif
+#endif
+ set_rq_task(rq, idle);
+
+ /* Silence PROVE_RCU */
+ rcu_read_lock();
+ set_task_cpu(idle, cpu);
+ rcu_read_unlock();
+
+ rq->curr = rq->idle = idle;
+ idle->on_cpu = 1;
+ grq_unlock();
+ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
+
+ /* Set the preempt count _outside_ the spinlocks! */
+ init_idle_preempt_count(idle, cpu);
+
+ ftrace_graph_init_idle_task(idle, cpu);
+#ifdef CONFIG_SMP
+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
+}
+
+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
+ const struct cpumask __maybe_unused *trial)
+{
+ return 1;
+}
+
+int task_can_attach(struct task_struct *p,
+ const struct cpumask *cs_cpus_allowed)
+{
+ int ret = 0;
+
+ /*
+ * Kthreads which disallow setaffinity shouldn't be moved
+ * to a new cpuset; we don't want to change their cpu
+ * affinity and isolating such threads by their set of
+ * allowed nodes is unnecessary. Thus, cpusets are not
+ * applicable for such threads. This prevents checking for
+ * success of set_cpus_allowed_ptr() on all attached tasks
+ * before cpus_allowed may be changed.
+ */
+ if (p->flags & PF_NO_SETAFFINITY)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+ struct wake_q_node *node = &task->wake_q;
+
+ /*
+ * Atomically grab the task, if ->wake_q is !nil already it means
+ * its already queued (either by us or someone else) and will get the
+ * wakeup due to that.
+ *
+ * This cmpxchg() implies a full barrier, which pairs with the write
+ * barrier implied by the wakeup in wake_up_list().
+ */
+ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+ return;
+
+ get_task_struct(task);
+
+ /*
+ * The head is context local, there can be no concurrency.
+ */
+ *head->lastp = node;
+ head->lastp = &node->next;
+}
+
+void wake_up_q(struct wake_q_head *head)
+{
+ struct wake_q_node *node = head->first;
+
+ while (node != WAKE_Q_TAIL) {
+ struct task_struct *task;
+
+ task = container_of(node, struct task_struct, wake_q);
+ BUG_ON(!task);
+ /* task can safely be re-inserted now */
+ node = node->next;
+ task->wake_q.next = NULL;
+
+ /*
+ * wake_up_process() implies a wmb() to pair with the queueing
+ * in wake_q_add() so as not to miss wakeups.
+ */
+ wake_up_process(task);
+ put_task_struct(task);
+ }
+}
+
+void resched_cpu(int cpu)
+{
+ unsigned long flags;
+
+ grq_lock_irqsave(&flags);
+ resched_task(cpu_curr(cpu));
+ grq_unlock_irqrestore(&flags);
+}
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ_COMMON
+void nohz_balance_enter_idle(int cpu)
+{
+}
+
+void select_nohz_load_balancer(int stop_tick)
+{
+}
+
+void set_cpu_sd_state_idle(void) {}
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+/**
+ * lowest_flag_domain - Return lowest sched_domain containing flag.
+ * @cpu: The cpu whose lowest level of sched domain is to
+ * be returned.
+ * @flag: The flag to check for the lowest sched_domain
+ * for the given cpu.
+ *
+ * Returns the lowest sched_domain of a cpu which contains the given flag.
+ */
+static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
+{
+ struct sched_domain *sd;
+
+ for_each_domain(cpu, sd)
+ if (sd && (sd->flags & flag))
+ break;
+
+ return sd;
+}
+
+/**
+ * for_each_flag_domain - Iterates over sched_domains containing the flag.
+ * @cpu: The cpu whose domains we're iterating over.
+ * @sd: variable holding the value of the power_savings_sd
+ * for cpu.
+ * @flag: The flag to filter the sched_domains to be iterated.
+ *
+ * Iterates over all the scheduler domains for a given cpu that has the 'flag'
+ * set, starting from the lowest sched_domain to the highest.
+ */
+#define for_each_flag_domain(cpu, sd, flag) \
+ for (sd = lowest_flag_domain(cpu, flag); \
+ (sd && (sd->flags & flag)); sd = sd->parent)
+
+#endif /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
+
+/*
+ * In the semi idle case, use the nearest busy cpu for migrating timers
+ * from an idle cpu. This is good for power-savings.
+ *
+ * We don't do similar optimization for completely idle system, as
+ * selecting an idle cpu will add more delays to the timers than intended
+ * (as that cpu's timer base may not be uptodate wrt jiffies etc).
+ */
+int get_nohz_timer_target(void)
+{
+ int i, cpu = smp_processor_id();
+ struct sched_domain *sd;
+
+ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
+ return cpu;
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+ for_each_cpu(i, sched_domain_span(sd)) {
+ if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
+ cpu = i;
+ goto unlock;
+ }
+ }
+ }
+
+ if (!is_housekeeping_cpu(cpu))
+ cpu = housekeeping_any_cpu();
+unlock:
+ rcu_read_unlock();
+ return cpu;
+}
+
+/*
+ * When add_timer_on() enqueues a timer into the timer wheel of an
+ * idle CPU then this timer might expire before the next timer event
+ * which is scheduled to wake up that CPU. In case of a completely
+ * idle system the next event might even be infinite time into the
+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
+ * leaves the inner idle loop so the newly added timer is taken into
+ * account when the CPU goes back to idle and evaluates the timer
+ * wheel for the next timer event.
+ */
+void wake_up_idle_cpu(int cpu)
+{
+ if (cpu == smp_processor_id())
+ return;
+
+ set_tsk_need_resched(cpu_rq(cpu)->idle);
+ smp_send_reschedule(cpu);
+}
+
+void wake_up_nohz_cpu(int cpu)
+{
+ wake_up_idle_cpu(cpu);
+}
+#endif /* CONFIG_NO_HZ_COMMON */
+
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, bool check)
+{
+ bool running_wrong = false;
+ bool queued = false;
+ unsigned long flags;
+ struct rq *rq;
+ int ret = 0;
+
+ rq = task_grq_lock(p, &flags);
+
+ /*
+ * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag.
+ */
+ if (check && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (cpumask_equal(tsk_cpus_allowed(p), new_mask))
+ goto out;
+
+ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ queued = task_queued(p);
+
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+ if (cpumask_test_cpu(task_cpu(p), new_mask))
+ goto out;
+
+ if (task_running(p)) {
+ /* Task is running on the wrong cpu now, reschedule it. */
+ if (rq == this_rq()) {
+ set_tsk_need_resched(p);
+ running_wrong = true;
+ } else
+ resched_task(p);
+ } else
+ set_task_cpu(p, cpumask_any_and(cpu_active_mask, new_mask));
+
+out:
+ if (queued)
+ try_preempt(p, rq);
+ task_grq_unlock(&flags);
+
+ if (running_wrong)
+ preempt_schedule_common();
+
+ return ret;
+}
+
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+ return __set_cpus_allowed_ptr(p, new_mask, false);
+}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* Run through task list and find tasks affined to the dead cpu, then remove
+ * that cpu from the list, enable cpu0 and set the zerobound flag. */
+static void bind_zero(int src_cpu)
+{
+ struct task_struct *p, *t;
+ int bound = 0;
+
+ if (src_cpu == 0)
+ return;
+
+ do_each_thread(t, p) {
+ if (cpumask_test_cpu(src_cpu, tsk_cpus_allowed(p))) {
+ cpumask_clear_cpu(src_cpu, tsk_cpus_allowed(p));
+ cpumask_set_cpu(0, tsk_cpus_allowed(p));
+ p->zerobound = true;
+ bound++;
+ }
+ clear_sticky(p);
+ } while_each_thread(t, p);
+
+ if (bound) {
+ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n",
+ bound, src_cpu);
+ }
+}
+
+/* Find processes with the zerobound flag and reenable their affinity for the
+ * CPU coming alive. */
+static void unbind_zero(int src_cpu)
+{
+ int unbound = 0, zerobound = 0;
+ struct task_struct *p, *t;
+
+ if (src_cpu == 0)
+ return;
+
+ do_each_thread(t, p) {
+ if (!p->mm)
+ p->zerobound = false;
+ if (p->zerobound) {
+ unbound++;
+ cpumask_set_cpu(src_cpu, tsk_cpus_allowed(p));
+ /* Once every CPU affinity has been re-enabled, remove
+ * the zerobound flag */
+ if (cpumask_subset(cpu_possible_mask, tsk_cpus_allowed(p))) {
+ p->zerobound = false;
+ zerobound++;
+ }
+ }
+ } while_each_thread(t, p);
+
+ if (unbound) {
+ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n",
+ unbound, src_cpu);
+ }
+ if (zerobound) {
+ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n",
+ zerobound);
+ }
+}
+
+/*
+ * Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+ */
+void idle_task_exit(void)
+{
+ struct mm_struct *mm = current->active_mm;
+
+ BUG_ON(cpu_online(smp_processor_id()));
+
+ if (mm != &init_mm) {
+ switch_mm(mm, &init_mm, current);
+ finish_arch_post_lock_switch();
+ }
+ mmdrop(mm);
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void unbind_zero(int src_cpu) {}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
+ struct sched_param start_param = { .sched_priority = 0 };
+ struct task_struct *old_stop = cpu_rq(cpu)->stop;
+
+ if (stop) {
+ /*
+ * Make it appear like a SCHED_FIFO task, its something
+ * userspace knows about and won't get confused about.
+ *
+ * Also, it will make PI more or less work without too
+ * much confusion -- but then, stop work should not
+ * rely on PI working anyway.
+ */
+ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
+ }
+
+ cpu_rq(cpu)->stop = stop;
+
+ if (old_stop) {
+ /*
+ * Reset it back to a normal scheduling policy so that
+ * it can die in pieces.
+ */
+ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
+ }
+}
+
+
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+
+static struct ctl_table sd_ctl_dir[] = {
+ {
+ .procname = "sched_domain",
+ .mode = 0555,
+ },
+ {}
+};
+
+static struct ctl_table sd_ctl_root[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = sd_ctl_dir,
+ },
+ {}
+};
+
+static struct ctl_table *sd_alloc_ctl_entry(int n)
+{
+ struct ctl_table *entry =
+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
+
+ return entry;
+}
+
+static void sd_free_ctl_entry(struct ctl_table **tablep)
+{
+ struct ctl_table *entry;
+
+ /*
+ * In the intermediate directories, both the child directory and
+ * procname are dynamically allocated and could fail but the mode
+ * will always be set. In the lowest directory the names are
+ * static strings and all have proc handlers.
+ */
+ for (entry = *tablep; entry->mode; entry++) {
+ if (entry->child)
+ sd_free_ctl_entry(&entry->child);
+ if (entry->proc_handler == NULL)
+ kfree(entry->procname);
+ }
+
+ kfree(*tablep);
+ *tablep = NULL;
+}
+
+static void
+set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+ mode_t mode, proc_handler *proc_handler)
+{
+ entry->procname = procname;
+ entry->data = data;
+ entry->maxlen = maxlen;
+ entry->mode = mode;
+ entry->proc_handler = proc_handler;
+}
+
+static struct ctl_table *
+sd_alloc_ctl_domain_table(struct sched_domain *sd)
+{
+ struct ctl_table *table = sd_alloc_ctl_entry(14);
+
+ if (table == NULL)
+ return NULL;
+
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[10], "flags", &sd->flags,
+ sizeof(int), 0644, proc_dointvec_minmax);
+ set_table_entry(&table[11], "max_newidle_lb_cost",
+ &sd->max_newidle_lb_cost,
+ sizeof(long), 0644, proc_doulongvec_minmax);
+ set_table_entry(&table[12], "name", sd->name,
+ CORENAME_MAX_SIZE, 0444, proc_dostring);
+ /* &table[13] is terminator */
+
+ return table;
+}
+
+static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+{
+ struct ctl_table *entry, *table;
+ struct sched_domain *sd;
+ int domain_num = 0, i;
+ char buf[32];
+
+ for_each_domain(cpu, sd)
+ domain_num++;
+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
+ if (table == NULL)
+ return NULL;
+
+ i = 0;
+ for_each_domain(cpu, sd) {
+ snprintf(buf, 32, "domain%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0555;
+ entry->child = sd_alloc_ctl_domain_table(sd);
+ entry++;
+ i++;
+ }
+ return table;
+}
+
+static struct ctl_table_header *sd_sysctl_header;
+static void register_sched_domain_sysctl(void)
+{
+ int i, cpu_num = num_possible_cpus();
+ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
+ char buf[32];
+
+ WARN_ON(sd_ctl_dir[0].child);
+ sd_ctl_dir[0].child = entry;
+
+ if (entry == NULL)
+ return;
+
+ for_each_possible_cpu(i) {
+ snprintf(buf, 32, "cpu%d", i);
+ entry->procname = kstrdup(buf, GFP_KERNEL);
+ entry->mode = 0555;
+ entry->child = sd_alloc_ctl_cpu_table(i);
+ entry++;
+ }
+
+ WARN_ON(sd_sysctl_header);
+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
+}
+
+/* may be called multiple times per register */
+static void unregister_sched_domain_sysctl(void)
+{
+ unregister_sysctl_table(sd_sysctl_header);
+ sd_sysctl_header = NULL;
+ if (sd_ctl_dir[0].child)
+ sd_free_ctl_entry(&sd_ctl_dir[0].child);
+}
+#else /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
+static void register_sched_domain_sysctl(void)
+{
+}
+static void unregister_sched_domain_sysctl(void)
+{
+}
+#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
+
+static void set_rq_online(struct rq *rq)
+{
+ if (!rq->online) {
+ cpumask_set_cpu(cpu_of(rq), rq->rd->online);
+ rq->online = true;
+ }
+}
+
+static void set_rq_offline(struct rq *rq)
+{
+ if (rq->online) {
+ cpumask_clear_cpu(cpu_of(rq), rq->rd->online);
+ rq->online = false;
+ }
+}
+
+/*
+ * migration_call - callback that gets triggered when a CPU is added.
+ */
+static int
+migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ unsigned long flags;
+ struct rq *rq = cpu_rq(cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+ struct task_struct *idle = rq->idle;
+#endif
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ return NOTIFY_OK;
+ case CPU_UP_PREPARE:
+ break;
+
+ case CPU_ONLINE:
+ /* Update our root-domain */
+ grq_lock_irqsave(&flags);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+
+ set_rq_online(rq);
+ }
+ unbind_zero(cpu);
+ grq.noc = num_online_cpus();
+ grq_unlock_irqrestore(&flags);
+ break;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ grq_lock_irq();
+ set_rq_task(rq, idle);
+ update_clocks(rq);
+ grq_unlock_irq();
+ break;
+
+ case CPU_DYING:
+ /* Update our root-domain */
+ grq_lock_irqsave(&flags);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+ bind_zero(cpu);
+ grq.noc = num_online_cpus();
+ grq_unlock_irqrestore(&flags);
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+/*
+ * Register at high priority so that task migration (migrate_all_tasks)
+ * happens before everything else. This has to be lower priority than
+ * the notifier in the perf_counter subsystem, though.
+ */
+static struct notifier_block migration_notifier = {
+ .notifier_call = migration_call,
+ .priority = CPU_PRI_MIGRATION,
+};
+
+static int sched_cpu_active(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ return NOTIFY_OK;
+ case CPU_ONLINE:
+ /*
+ * At this point a starting CPU has marked itself as online via
+ * set_cpu_online(). But it might not yet have marked itself
+ * as active, which is essential from here on.
+ */
+ set_cpu_active(cpu, true);
+ stop_machine_unpark(cpu);
+ return NOTIFY_OK;
+
+ case CPU_DOWN_FAILED:
+ set_cpu_active(cpu, true);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int sched_cpu_inactive(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_PREPARE:
+ set_cpu_active((long)hcpu, false);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+int __init migration_init(void)
+{
+ void *cpu = (void *)(long)smp_processor_id();
+ int err;
+
+ /* Initialise migration for the boot CPU */
+ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+ BUG_ON(err == NOTIFY_BAD);
+ migration_call(&migration_notifier, CPU_ONLINE, cpu);
+ register_cpu_notifier(&migration_notifier);
+
+ /* Register cpu active notifiers */
+ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
+ cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
+
+ return 0;
+}
+early_initcall(migration_init);
+
+static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
+
+#ifdef CONFIG_SCHED_DEBUG
+
+static __read_mostly int sched_debug_enabled;
+
+static int __init sched_debug_setup(char *str)
+{
+ sched_debug_enabled = 1;
+
+ return 0;
+}
+early_param("sched_debug", sched_debug_setup);
+
+static inline bool sched_debug(void)
+{
+ return sched_debug_enabled;
+}
+
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
+ struct cpumask *groupmask)
+{
+ cpumask_clear(groupmask);
+
+ printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
+
+ if (!(sd->flags & SD_LOAD_BALANCE)) {
+ printk("does not load-balance\n");
+ if (sd->parent)
+ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+ " has parent");
+ return -1;
+ }
+
+ printk(KERN_CONT "span %*pbl level %s\n",
+ cpumask_pr_args(sched_domain_span(sd)), sd->name);
+
+ if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
+ printk(KERN_ERR "ERROR: domain->span does not contain "
+ "CPU%d\n", cpu);
+ }
+
+ printk(KERN_CONT "\n");
+
+ if (!cpumask_equal(sched_domain_span(sd), groupmask))
+ printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+
+ if (sd->parent &&
+ !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
+ printk(KERN_ERR "ERROR: parent span is not a superset "
+ "of domain->span\n");
+ return 0;
+}
+
+static void sched_domain_debug(struct sched_domain *sd, int cpu)
+{
+ int level = 0;
+
+ if (!sched_debug_enabled)
+ return;
+
+ if (!sd) {
+ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
+ return;
+ }
+
+ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+
+ for (;;) {
+ if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
+ break;
+ level++;
+ sd = sd->parent;
+ if (!sd)
+ break;
+ }
+}
+#else /* !CONFIG_SCHED_DEBUG */
+# define sched_domain_debug(sd, cpu) do { } while (0)
+static inline bool sched_debug(void)
+{
+ return false;
+}
+#endif /* CONFIG_SCHED_DEBUG */
+
+static int sd_degenerate(struct sched_domain *sd)
+{
+ if (cpumask_weight(sched_domain_span(sd)) == 1)
+ return 1;
+
+ /* Following flags don't use groups */
+ if (sd->flags & (SD_WAKE_AFFINE))
+ return 0;
+
+ return 1;
+}
+
+static int
+sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
+{
+ unsigned long cflags = sd->flags, pflags = parent->flags;
+
+ if (sd_degenerate(parent))
+ return 1;
+
+ if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
+ return 0;
+
+ if (~cflags & pflags)
+ return 0;
+
+ return 1;
+}
+
+static void free_rootdomain(struct rcu_head *rcu)
+{
+ struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
+
+ cpupri_cleanup(&rd->cpupri);
+ free_cpumask_var(rd->rto_mask);
+ free_cpumask_var(rd->online);
+ free_cpumask_var(rd->span);
+ kfree(rd);
+}
+
+static void rq_attach_root(struct rq *rq, struct root_domain *rd)
+{
+ struct root_domain *old_rd = NULL;
+ unsigned long flags;
+
+ grq_lock_irqsave(&flags);
+
+ if (rq->rd) {
+ old_rd = rq->rd;
+
+ if (cpumask_test_cpu(rq->cpu, old_rd->online))
+ set_rq_offline(rq);
+
+ cpumask_clear_cpu(rq->cpu, old_rd->span);
+
+ /*
+ * If we dont want to free the old_rd yet then
+ * set old_rd to NULL to skip the freeing later
+ * in this function:
+ */
+ if (!atomic_dec_and_test(&old_rd->refcount))
+ old_rd = NULL;
+ }
+
+ atomic_inc(&rd->refcount);
+ rq->rd = rd;
+
+ cpumask_set_cpu(rq->cpu, rd->span);
+ if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
+ set_rq_online(rq);
+
+ grq_unlock_irqrestore(&flags);
+
+ if (old_rd)
+ call_rcu_sched(&old_rd->rcu, free_rootdomain);
+}
+
+static int init_rootdomain(struct root_domain *rd)
+{
+ memset(rd, 0, sizeof(*rd));
+
+ if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
+ goto out;
+ if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
+ goto free_span;
+ if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ goto free_online;
+
+ if (cpupri_init(&rd->cpupri) != 0)
+ goto free_rto_mask;
+ return 0;
+
+free_rto_mask:
+ free_cpumask_var(rd->rto_mask);
+free_online:
+ free_cpumask_var(rd->online);
+free_span:
+ free_cpumask_var(rd->span);
+out:
+ return -ENOMEM;
+}
+
+static void init_defrootdomain(void)
+{
+ init_rootdomain(&def_root_domain);
+
+ atomic_set(&def_root_domain.refcount, 1);
+}
+
+static struct root_domain *alloc_rootdomain(void)
+{
+ struct root_domain *rd;
+
+ rd = kmalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return NULL;
+
+ if (init_rootdomain(rd) != 0) {
+ kfree(rd);
+ return NULL;
+ }
+
+ return rd;
+}
+
+static void free_sched_domain(struct rcu_head *rcu)
+{
+ struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
+
+ kfree(sd);
+}
+
+static void destroy_sched_domain(struct sched_domain *sd, int cpu)
+{
+ call_rcu(&sd->rcu, free_sched_domain);
+}
+
+static void destroy_sched_domains(struct sched_domain *sd, int cpu)
+{
+ for (; sd; sd = sd->parent)
+ destroy_sched_domain(sd, cpu);
+}
+
+/*
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
+ * hold the hotplug lock.
+ */
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct sched_domain *tmp;
+
+ /* Remove the sched domains which do not contribute to scheduling. */
+ for (tmp = sd; tmp; ) {
+ struct sched_domain *parent = tmp->parent;
+ if (!parent)
+ break;
+
+ if (sd_parent_degenerate(tmp, parent)) {
+ tmp->parent = parent->parent;
+ if (parent->parent)
+ parent->parent->child = tmp;
+ /*
+ * Transfer SD_PREFER_SIBLING down in case of a
+ * degenerate parent; the spans match for this
+ * so the property transfers.
+ */
+ if (parent->flags & SD_PREFER_SIBLING)
+ tmp->flags |= SD_PREFER_SIBLING;
+ destroy_sched_domain(parent, cpu);
+ } else
+ tmp = tmp->parent;
+ }
+
+ if (sd && sd_degenerate(sd)) {
+ tmp = sd;
+ sd = sd->parent;
+ destroy_sched_domain(tmp, cpu);
+ if (sd)
+ sd->child = NULL;
+ }
+
+ sched_domain_debug(sd, cpu);
+
+ rq_attach_root(rq, rd);
+ tmp = rq->sd;
+ rcu_assign_pointer(rq->sd, sd);
+ destroy_sched_domains(tmp, cpu);
+}
+
+/* Setup the mask of cpus configured for isolated domains */
+static int __init isolated_cpu_setup(char *str)
+{
+ alloc_bootmem_cpumask_var(&cpu_isolated_map);
+ cpulist_parse(str, cpu_isolated_map);
+ return 1;
+}
+
+__setup("isolcpus=", isolated_cpu_setup);
+
+struct s_data {
+ struct sched_domain ** __percpu sd;
+ struct root_domain *rd;
+};
+
+enum s_alloc {
+ sa_rootdomain,
+ sa_sd,
+ sa_sd_storage,
+ sa_none,
+};
+
+/*
+ * Initializers for schedule domains
+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
+ */
+
+static int default_relax_domain_level = -1;
+int sched_domain_level_max;
+
+static int __init setup_relax_domain_level(char *str)
+{
+ if (kstrtoint(str, 0, &default_relax_domain_level))
+ pr_warn("Unable to set relax_domain_level\n");
+
+ return 1;
+}
+__setup("relax_domain_level=", setup_relax_domain_level);
+
+static void set_domain_attribute(struct sched_domain *sd,
+ struct sched_domain_attr *attr)
+{
+ int request;
+
+ if (!attr || attr->relax_domain_level < 0) {
+ if (default_relax_domain_level < 0)
+ return;
+ else
+ request = default_relax_domain_level;
+ } else
+ request = attr->relax_domain_level;
+ if (request < sd->level) {
+ /* turn off idle balance on this domain */
+ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+ } else {
+ /* turn on idle balance on this domain */
+ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
+ }
+}
+
+static void __sdt_free(const struct cpumask *cpu_map);
+static int __sdt_alloc(const struct cpumask *cpu_map);
+
+static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
+ const struct cpumask *cpu_map)
+{
+ switch (what) {
+ case sa_rootdomain:
+ if (!atomic_read(&d->rd->refcount))
+ free_rootdomain(&d->rd->rcu); /* fall through */
+ case sa_sd:
+ free_percpu(d->sd); /* fall through */
+ case sa_sd_storage:
+ __sdt_free(cpu_map); /* fall through */
+ case sa_none:
+ break;
+ }
+}
+
+static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
+ const struct cpumask *cpu_map)
+{
+ memset(d, 0, sizeof(*d));
+
+ if (__sdt_alloc(cpu_map))
+ return sa_sd_storage;
+ d->sd = alloc_percpu(struct sched_domain *);
+ if (!d->sd)
+ return sa_sd_storage;
+ d->rd = alloc_rootdomain();
+ if (!d->rd)
+ return sa_sd;
+ return sa_rootdomain;
+}
+
+/*
+ * NULL the sd_data elements we've used to build the sched_domain
+ * structure so that the subsequent __free_domain_allocs()
+ * will not free the data we're using.
+ */
+static void claim_allocations(int cpu, struct sched_domain *sd)
+{
+ struct sd_data *sdd = sd->private;
+
+ WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
+ *per_cpu_ptr(sdd->sd, cpu) = NULL;
+}
+
+#ifdef CONFIG_NUMA
+static int sched_domains_numa_levels;
+static int *sched_domains_numa_distance;
+static struct cpumask ***sched_domains_numa_masks;
+static int sched_domains_curr_level;
+#endif
+
+/*
+ * SD_flags allowed in topology descriptions.
+ *
+ * SD_SHARE_CPUCAPACITY - describes SMT topologies
+ * SD_SHARE_PKG_RESOURCES - describes shared caches
+ * SD_NUMA - describes NUMA topologies
+ * SD_SHARE_POWERDOMAIN - describes shared power domain
+ *
+ * Odd one out:
+ * SD_ASYM_PACKING - describes SMT quirks
+ */
+#define TOPOLOGY_SD_FLAGS \
+ (SD_SHARE_CPUCAPACITY | \
+ SD_SHARE_PKG_RESOURCES | \
+ SD_NUMA | \
+ SD_ASYM_PACKING | \
+ SD_SHARE_POWERDOMAIN)
+
+static struct sched_domain *
+sd_init(struct sched_domain_topology_level *tl, int cpu)
+{
+ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
+ int sd_weight, sd_flags = 0;
+
+#ifdef CONFIG_NUMA
+ /*
+ * Ugly hack to pass state to sd_numa_mask()...
+ */
+ sched_domains_curr_level = tl->numa_level;
+#endif
+
+ sd_weight = cpumask_weight(tl->mask(cpu));
+
+ if (tl->sd_flags)
+ sd_flags = (*tl->sd_flags)();
+ if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
+ "wrong sd_flags in topology description\n"))
+ sd_flags &= ~TOPOLOGY_SD_FLAGS;
+
+ *sd = (struct sched_domain){
+ .min_interval = sd_weight,
+ .max_interval = 2*sd_weight,
+ .busy_factor = 32,
+ .imbalance_pct = 125,
+
+ .cache_nice_tries = 0,
+ .busy_idx = 0,
+ .idle_idx = 0,
+ .newidle_idx = 0,
+ .wake_idx = 0,
+ .forkexec_idx = 0,
+
+ .flags = 1*SD_LOAD_BALANCE
+ | 1*SD_BALANCE_NEWIDLE
+ | 1*SD_BALANCE_EXEC
+ | 1*SD_BALANCE_FORK
+ | 0*SD_BALANCE_WAKE
+ | 1*SD_WAKE_AFFINE
+ | 0*SD_SHARE_CPUCAPACITY
+ | 0*SD_SHARE_PKG_RESOURCES
+ | 0*SD_SERIALIZE
+ | 0*SD_PREFER_SIBLING
+ | 0*SD_NUMA
+ | sd_flags
+ ,
+
+ .last_balance = jiffies,
+ .balance_interval = sd_weight,
+ .smt_gain = 0,
+ .max_newidle_lb_cost = 0,
+ .next_decay_max_lb_cost = jiffies,
+#ifdef CONFIG_SCHED_DEBUG
+ .name = tl->name,
+#endif
+ };
+
+ /*
+ * Convert topological properties into behaviour.
+ */
+
+ if (sd->flags & SD_SHARE_CPUCAPACITY) {
+ sd->flags |= SD_PREFER_SIBLING;
+ sd->imbalance_pct = 110;
+ sd->smt_gain = 1178; /* ~15% */
+
+ } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+ sd->imbalance_pct = 117;
+ sd->cache_nice_tries = 1;
+ sd->busy_idx = 2;
+
+#ifdef CONFIG_NUMA
+ } else if (sd->flags & SD_NUMA) {
+ sd->cache_nice_tries = 2;
+ sd->busy_idx = 3;
+ sd->idle_idx = 2;
+
+ sd->flags |= SD_SERIALIZE;
+ if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
+ sd->flags &= ~(SD_BALANCE_EXEC |
+ SD_BALANCE_FORK |
+ SD_WAKE_AFFINE);
+ }
+
+#endif
+ } else {
+ sd->flags |= SD_PREFER_SIBLING;
+ sd->cache_nice_tries = 1;
+ sd->busy_idx = 2;
+ sd->idle_idx = 1;
+ }
+
+ sd->private = &tl->data;
+
+ return sd;
+}
+
+/*
+ * Topology list, bottom-up.
+ */
+static struct sched_domain_topology_level default_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
+static struct sched_domain_topology_level *sched_domain_topology =
+ default_topology;
+
+#define for_each_sd_topology(tl) \
+ for (tl = sched_domain_topology; tl->mask; tl++)
+
+void set_sched_topology(struct sched_domain_topology_level *tl)
+{
+ sched_domain_topology = tl;
+}
+
+#ifdef CONFIG_NUMA
+
+static const struct cpumask *sd_numa_mask(int cpu)
+{
+ return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
+}
+
+static void sched_numa_warn(const char *str)
+{
+ static int done = false;
+ int i,j;
+
+ if (done)
+ return;
+
+ done = true;
+
+ printk(KERN_WARNING "ERROR: %s\n\n", str);
+
+ for (i = 0; i < nr_node_ids; i++) {
+ printk(KERN_WARNING " ");
+ for (j = 0; j < nr_node_ids; j++)
+ printk(KERN_CONT "%02d ", node_distance(i,j));
+ printk(KERN_CONT "\n");
+ }
+ printk(KERN_WARNING "\n");
+}
+
+static bool find_numa_distance(int distance)
+{
+ int i;
+
+ if (distance == node_distance(0, 0))
+ return true;
+
+ for (i = 0; i < sched_domains_numa_levels; i++) {
+ if (sched_domains_numa_distance[i] == distance)
+ return true;
+ }
+
+ return false;
+}
+
+static void sched_init_numa(void)
+{
+ int next_distance, curr_distance = node_distance(0, 0);
+ struct sched_domain_topology_level *tl;
+ int level = 0;
+ int i, j, k;
+
+ sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
+ if (!sched_domains_numa_distance)
+ return;
+
+ /*
+ * O(nr_nodes^2) deduplicating selection sort -- in order to find the
+ * unique distances in the node_distance() table.
+ *
+ * Assumes node_distance(0,j) includes all distances in
+ * node_distance(i,j) in order to avoid cubic time.
+ */
+ next_distance = curr_distance;
+ for (i = 0; i < nr_node_ids; i++) {
+ for (j = 0; j < nr_node_ids; j++) {
+ for (k = 0; k < nr_node_ids; k++) {
+ int distance = node_distance(i, k);
+
+ if (distance > curr_distance &&
+ (distance < next_distance ||
+ next_distance == curr_distance))
+ next_distance = distance;
+
+ /*
+ * While not a strong assumption it would be nice to know
+ * about cases where if node A is connected to B, B is not
+ * equally connected to A.
+ */
+ if (sched_debug() && node_distance(k, i) != distance)
+ sched_numa_warn("Node-distance not symmetric");
+
+ if (sched_debug() && i && !find_numa_distance(distance))
+ sched_numa_warn("Node-0 not representative");
+ }
+ if (next_distance != curr_distance) {
+ sched_domains_numa_distance[level++] = next_distance;
+ sched_domains_numa_levels = level;
+ curr_distance = next_distance;
+ } else break;
+ }
+
+ /*
+ * In case of sched_debug() we verify the above assumption.
+ */
+ if (!sched_debug())
+ break;
+ }
+ /*
+ * 'level' contains the number of unique distances, excluding the
+ * identity distance node_distance(i,i).
+ *
+ * The sched_domains_numa_distance[] array includes the actual distance
+ * numbers.
+ */
+
+ /*
+ * Here, we should temporarily reset sched_domains_numa_levels to 0.
+ * If it fails to allocate memory for array sched_domains_numa_masks[][],
+ * the array will contain less then 'level' members. This could be
+ * dangerous when we use it to iterate array sched_domains_numa_masks[][]
+ * in other functions.
+ *
+ * We reset it to 'level' at the end of this function.
+ */
+ sched_domains_numa_levels = 0;
+
+ sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
+ if (!sched_domains_numa_masks)
+ return;
+
+ /*
+ * Now for each level, construct a mask per node which contains all
+ * cpus of nodes that are that many hops away from us.
+ */
+ for (i = 0; i < level; i++) {
+ sched_domains_numa_masks[i] =
+ kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
+ if (!sched_domains_numa_masks[i])
+ return;
+
+ for (j = 0; j < nr_node_ids; j++) {
+ struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!mask)
+ return;
+
+ sched_domains_numa_masks[i][j] = mask;
+
+ for_each_node(k) {
+ if (node_distance(j, k) > sched_domains_numa_distance[i])
+ continue;
+
+ cpumask_or(mask, mask, cpumask_of_node(k));
+ }
+ }
+ }
+
+ /* Compute default topology size */
+ for (i = 0; sched_domain_topology[i].mask; i++);
+
+ tl = kzalloc((i + level + 1) *
+ sizeof(struct sched_domain_topology_level), GFP_KERNEL);
+ if (!tl)
+ return;
+
+ /*
+ * Copy the default topology bits..
+ */
+ for (i = 0; sched_domain_topology[i].mask; i++)
+ tl[i] = sched_domain_topology[i];
+
+ /*
+ * .. and append 'j' levels of NUMA goodness.
+ */
+ for (j = 0; j < level; i++, j++) {
+ tl[i] = (struct sched_domain_topology_level){
+ .mask = sd_numa_mask,
+ .sd_flags = cpu_numa_flags,
+ .flags = SDTL_OVERLAP,
+ .numa_level = j,
+ SD_INIT_NAME(NUMA)
+ };
+ }
+
+ sched_domain_topology = tl;
+
+ sched_domains_numa_levels = level;
+}
+
+static void sched_domains_numa_masks_set(int cpu)
+{
+ int i, j;
+ int node = cpu_to_node(cpu);
+
+ for (i = 0; i < sched_domains_numa_levels; i++) {
+ for (j = 0; j < nr_node_ids; j++) {
+ if (node_distance(j, node) <= sched_domains_numa_distance[i])
+ cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
+ }
+ }
+}
+
+static void sched_domains_numa_masks_clear(int cpu)
+{
+ int i, j;
+ for (i = 0; i < sched_domains_numa_levels; i++) {
+ for (j = 0; j < nr_node_ids; j++)
+ cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
+ }
+}
+
+/*
+ * Update sched_domains_numa_masks[level][node] array when new cpus
+ * are onlined.
+ */
+static int sched_domains_numa_masks_update(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ sched_domains_numa_masks_set(cpu);
+ break;
+
+ case CPU_DEAD:
+ sched_domains_numa_masks_clear(cpu);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+#else
+static inline void sched_init_numa(void)
+{
+}
+
+static int sched_domains_numa_masks_update(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA */
+
+static int __sdt_alloc(const struct cpumask *cpu_map)
+{
+ struct sched_domain_topology_level *tl;
+ int j;
+
+ for_each_sd_topology(tl) {
+ struct sd_data *sdd = &tl->data;
+
+ sdd->sd = alloc_percpu(struct sched_domain *);
+ if (!sdd->sd)
+ return -ENOMEM;
+
+ for_each_cpu(j, cpu_map) {
+ struct sched_domain *sd;
+
+ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sd)
+ return -ENOMEM;
+
+ *per_cpu_ptr(sdd->sd, j) = sd;
+ }
+ }
+
+ return 0;
+}
+
+static void __sdt_free(const struct cpumask *cpu_map)
+{
+ struct sched_domain_topology_level *tl;
+ int j;
+
+ for_each_sd_topology(tl) {
+ struct sd_data *sdd = &tl->data;
+
+ for_each_cpu(j, cpu_map) {
+ struct sched_domain *sd;
+
+ if (sdd->sd) {
+ sd = *per_cpu_ptr(sdd->sd, j);
+ kfree(*per_cpu_ptr(sdd->sd, j));
+ }
+ }
+ free_percpu(sdd->sd);
+ sdd->sd = NULL;
+ }
+}
+
+struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+ struct sched_domain *child, int cpu)
+{
+ struct sched_domain *sd = sd_init(tl, cpu);
+ if (!sd)
+ return child;
+
+ cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
+ if (child) {
+ sd->level = child->level + 1;
+ sched_domain_level_max = max(sched_domain_level_max, sd->level);
+ child->parent = sd;
+ sd->child = child;
+
+ if (!cpumask_subset(sched_domain_span(child),
+ sched_domain_span(sd))) {
+ pr_err("BUG: arch topology borken\n");
+#ifdef CONFIG_SCHED_DEBUG
+ pr_err(" the %s domain not a subset of the %s domain\n",
+ child->name, sd->name);
+#endif
+ /* Fixup, ensure @sd has at least @child cpus. */
+ cpumask_or(sched_domain_span(sd),
+ sched_domain_span(sd),
+ sched_domain_span(child));
+ }
+
+ }
+ set_domain_attribute(sd, attr);
+
+ return sd;
+}
+
+/*
+ * Build sched domains for a given set of cpus and attach the sched domains
+ * to the individual cpus
+ */
+static int build_sched_domains(const struct cpumask *cpu_map,
+ struct sched_domain_attr *attr)
+{
+ enum s_alloc alloc_state;
+ struct sched_domain *sd;
+ struct s_data d;
+ int i, ret = -ENOMEM;
+
+ alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
+ if (alloc_state != sa_rootdomain)
+ goto error;
+
+ /* Set up domains for cpus specified by the cpu_map. */
+ for_each_cpu(i, cpu_map) {
+ struct sched_domain_topology_level *tl;
+
+ sd = NULL;
+ for_each_sd_topology(tl) {
+ sd = build_sched_domain(tl, cpu_map, attr, sd, i);
+ if (tl == sched_domain_topology)
+ *per_cpu_ptr(d.sd, i) = sd;
+ if (tl->flags & SDTL_OVERLAP)
+ sd->flags |= SD_OVERLAP;
+ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+ break;
+ }
+ }
+
+ /* Calculate CPU capacity for physical packages and nodes */
+ for (i = nr_cpumask_bits-1; i >= 0; i--) {
+ if (!cpumask_test_cpu(i, cpu_map))
+ continue;
+
+ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
+ claim_allocations(i, sd);
+ }
+ }
+
+ /* Attach the domains */
+ rcu_read_lock();
+ for_each_cpu(i, cpu_map) {
+ sd = *per_cpu_ptr(d.sd, i);
+ cpu_attach_domain(sd, d.rd, i);
+ }
+ rcu_read_unlock();
+
+ ret = 0;
+error:
+ __free_domain_allocs(&d, alloc_state, cpu_map);
+ return ret;
+}
+
+static cpumask_var_t *doms_cur; /* current sched domains */
+static int ndoms_cur; /* number of sched domains in 'doms_cur' */
+static struct sched_domain_attr *dattr_cur;
+ /* attribues of custom domains in 'doms_cur' */
+
+/*
+ * Special case: If a kmalloc of a doms_cur partition (array of
+ * cpumask) fails, then fallback to a single sched domain,
+ * as determined by the single cpumask fallback_doms.
+ */
+static cpumask_var_t fallback_doms;
+
+/*
+ * arch_update_cpu_topology lets virtualized architectures update the
+ * cpu core maps. It is supposed to return 1 if the topology changed
+ * or 0 if it stayed the same.
+ */
+int __weak arch_update_cpu_topology(void)
+{
+ return 0;
+}
+
+cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
+{
+ int i;
+ cpumask_var_t *doms;
+
+ doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
+ if (!doms)
+ return NULL;
+ for (i = 0; i < ndoms; i++) {
+ if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
+ free_sched_domains(doms, i);
+ return NULL;
+ }
+ }
+ return doms;
+}
+
+void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
+{
+ unsigned int i;
+ for (i = 0; i < ndoms; i++)
+ free_cpumask_var(doms[i]);
+ kfree(doms);
+}
+
+/*
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * For now this just excludes isolated cpus, but could be used to
+ * exclude other special cases in the future.
+ */
+static int init_sched_domains(const struct cpumask *cpu_map)
+{
+ int err;
+
+ arch_update_cpu_topology();
+ ndoms_cur = 1;
+ doms_cur = alloc_sched_domains(ndoms_cur);
+ if (!doms_cur)
+ doms_cur = &fallback_doms;
+ cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
+ err = build_sched_domains(doms_cur[0], NULL);
+ register_sched_domain_sysctl();
+
+ return err;
+}
+
+/*
+ * Detach sched domains from a group of cpus specified in cpu_map
+ * These cpus will now be attached to the NULL domain
+ */
+static void detach_destroy_domains(const struct cpumask *cpu_map)
+{
+ int i;
+
+ rcu_read_lock();
+ for_each_cpu(i, cpu_map)
+ cpu_attach_domain(NULL, &def_root_domain, i);
+ rcu_read_unlock();
+}
+
+/* handle null as "default" */
+static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
+ struct sched_domain_attr *new, int idx_new)
+{
+ struct sched_domain_attr tmp;
+
+ /* fast path */
+ if (!new && !cur)
+ return 1;
+
+ tmp = SD_ATTR_INIT;
+ return !memcmp(cur ? (cur + idx_cur) : &tmp,
+ new ? (new + idx_new) : &tmp,
+ sizeof(struct sched_domain_attr));
+}
+
+/*
+ * Partition sched domains as specified by the 'ndoms_new'
+ * cpumasks in the array doms_new[] of cpumasks. This compares
+ * doms_new[] to the current sched domain partitioning, doms_cur[].
+ * It destroys each deleted domain and builds each new domain.
+ *
+ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
+ * current 'doms_cur' domains and in the new 'doms_new', we can leave
+ * it as it is.
+ *
+ * The passed in 'doms_new' should be allocated using
+ * alloc_sched_domains. This routine takes ownership of it and will
+ * free_sched_domains it when done with it. If the caller failed the
+ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
+ * and partition_sched_domains() will fallback to the single partition
+ * 'fallback_doms', it also forces the domains to be rebuilt.
+ *
+ * If doms_new == NULL it will be replaced with cpu_online_mask.
+ * ndoms_new == 0 is a special case for destroying existing domains,
+ * and it will not create the default domain.
+ *
+ * Call with hotplug lock held
+ */
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
+{
+ int i, j, n;
+ int new_topology;
+
+ mutex_lock(&sched_domains_mutex);
+
+ /* always unregister in case we don't destroy any domains */
+ unregister_sched_domain_sysctl();
+
+ /* Let architecture update cpu core mappings. */
+ new_topology = arch_update_cpu_topology();
+
+ n = doms_new ? ndoms_new : 0;
+
+ /* Destroy deleted domains */
+ for (i = 0; i < ndoms_cur; i++) {
+ for (j = 0; j < n && !new_topology; j++) {
+ if (cpumask_equal(doms_cur[i], doms_new[j])
+ && dattrs_equal(dattr_cur, i, dattr_new, j))
+ goto match1;
+ }
+ /* no match - a current sched domain not in new doms_new[] */
+ detach_destroy_domains(doms_cur[i]);
+match1:
+ ;
+ }
+
+ n = ndoms_cur;
+ if (doms_new == NULL) {
+ n = 0;
+ doms_new = &fallback_doms;
+ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
+ WARN_ON_ONCE(dattr_new);
+ }
+
+ /* Build new domains */
+ for (i = 0; i < ndoms_new; i++) {
+ for (j = 0; j < n && !new_topology; j++) {
+ if (cpumask_equal(doms_new[i], doms_cur[j])
+ && dattrs_equal(dattr_new, i, dattr_cur, j))
+ goto match2;
+ }
+ /* no match - add a new doms_new */
+ build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
+match2:
+ ;
+ }
+
+ /* Remember the new sched domains */
+ if (doms_cur != &fallback_doms)
+ free_sched_domains(doms_cur, ndoms_cur);
+ kfree(dattr_cur); /* kfree(NULL) is safe */
+ doms_cur = doms_new;
+ dattr_cur = dattr_new;
+ ndoms_cur = ndoms_new;
+
+ register_sched_domain_sysctl();
+
+ mutex_unlock(&sched_domains_mutex);
+}
+
+static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
+
+/*
+ * Update cpusets according to cpu_active mask. If cpusets are
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
+ * around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
+ */
+static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ switch (action) {
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED_FROZEN:
+
+ /*
+ * num_cpus_frozen tracks how many CPUs are involved in suspend
+ * resume sequence. As long as this is not the last online
+ * operation in the resume sequence, just build a single sched
+ * domain, ignoring cpusets.
+ */
+ num_cpus_frozen--;
+ if (likely(num_cpus_frozen)) {
+ partition_sched_domains(1, NULL, NULL);
+ break;
+ }
+
+ /*
+ * This is the last CPU online operation. So fall through and
+ * restore the original sched domains by considering the
+ * cpuset configurations.
+ */
+
+ case CPU_ONLINE:
+ cpuset_update_active_cpus(true);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ cpuset_update_active_cpus(false);
+ break;
+ case CPU_DOWN_PREPARE_FROZEN:
+ num_cpus_frozen++;
+ partition_sched_domains(1, NULL, NULL);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
+/*
+ * Cheaper version of the below functions in case support for SMT and MC is
+ * compiled in but CPUs have no siblings.
+ */
+static bool sole_cpu_idle(int cpu)
+{
+ return rq_idle(cpu_rq(cpu));
+}
+#endif
+#ifdef CONFIG_SCHED_SMT
+static const cpumask_t *thread_cpumask(int cpu)
+{
+ return topology_sibling_cpumask(cpu);
+}
+/* All this CPU's SMT siblings are idle */
+static bool siblings_cpu_idle(int cpu)
+{
+ return cpumask_subset(thread_cpumask(cpu), &grq.cpu_idle_map);
+}
+#endif
+#ifdef CONFIG_SCHED_MC
+static const cpumask_t *core_cpumask(int cpu)
+{
+ return topology_core_cpumask(cpu);
+}
+/* All this CPU's shared cache siblings are idle */
+static bool cache_cpu_idle(int cpu)
+{
+ return cpumask_subset(core_cpumask(cpu), &grq.cpu_idle_map);
+}
+#endif
+
+enum sched_domain_level {
+ SD_LV_NONE = 0,
+ SD_LV_SIBLING,
+ SD_LV_MC,
+ SD_LV_BOOK,
+ SD_LV_CPU,
+ SD_LV_NODE,
+ SD_LV_ALLNODES,
+ SD_LV_MAX
+};
+
+void __init sched_init_smp(void)
+{
+ struct sched_domain *sd;
+ int cpu, other_cpu;
+
+ cpumask_var_t non_isolated_cpus;
+
+ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
+ alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
+
+ sched_init_numa();
+
+ /*
+ * There's no userspace yet to cause hotplug operations; hence all the
+ * cpu masks are stable and all blatant races in the below code cannot
+ * happen.
+ */
+ mutex_lock(&sched_domains_mutex);
+ init_sched_domains(cpu_active_mask);
+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
+ if (cpumask_empty(non_isolated_cpus))
+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
+ mutex_unlock(&sched_domains_mutex);
+
+ hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
+ hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
+ hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
+
+ /* Move init over to a non-isolated CPU */
+ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
+ BUG();
+ free_cpumask_var(non_isolated_cpus);
+
+ mutex_lock(&sched_domains_mutex);
+ grq_lock_irq();
+ /*
+ * Set up the relative cache distance of each online cpu from each
+ * other in a simple array for quick lookup. Locality is determined
+ * by the closest sched_domain that CPUs are separated by. CPUs with
+ * shared cache in SMT and MC are treated as local. Separate CPUs
+ * (within the same package or physically) within the same node are
+ * treated as not local. CPUs not even in the same domain (different
+ * nodes) are treated as very distant.
+ */
+ for_each_online_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+
+ /* First check if this cpu is in the same node */
+ for_each_domain(cpu, sd) {
+ if (sd->level > SD_LV_NODE)
+ continue;
+ /* Set locality to local node if not already found lower */
+ for_each_cpu(other_cpu, sched_domain_span(sd)) {
+ if (rq->cpu_locality[other_cpu] > 3)
+ rq->cpu_locality[other_cpu] = 3;
+ }
+ }
+
+ /*
+ * Each runqueue has its own function in case it doesn't have
+ * siblings of its own allowing mixed topologies.
+ */
+#ifdef CONFIG_SCHED_MC
+ for_each_cpu(other_cpu, core_cpumask(cpu)) {
+ if (rq->cpu_locality[other_cpu] > 2)
+ rq->cpu_locality[other_cpu] = 2;
+ }
+ if (cpumask_weight(core_cpumask(cpu)) > 1)
+ rq->cache_idle = cache_cpu_idle;
+#endif
+#ifdef CONFIG_SCHED_SMT
+ for_each_cpu(other_cpu, thread_cpumask(cpu))
+ rq->cpu_locality[other_cpu] = 1;
+ if (cpumask_weight(thread_cpumask(cpu)) > 1)
+ rq->siblings_idle = siblings_cpu_idle;
+#endif
+ }
+ grq_unlock_irq();
+ mutex_unlock(&sched_domains_mutex);
+
+ for_each_online_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+ for_each_online_cpu(other_cpu) {
+ if (other_cpu <= cpu)
+ continue;
+ printk(KERN_DEBUG "BFS LOCALITY CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]);
+ }
+ }
+}
+#else
+void __init sched_init_smp(void)
+{
+}
+#endif /* CONFIG_SMP */
+
+int in_sched_functions(unsigned long addr)
+{
+ return in_lock_functions(addr) ||
+ (addr >= (unsigned long)__sched_text_start
+ && addr < (unsigned long)__sched_text_end);
+}
+
+void __init sched_init(void)
+{
+#ifdef CONFIG_SMP
+ int cpu_ids;
+#endif
+ int i;
+ struct rq *rq;
+
+ prio_ratios[0] = 128;
+ for (i = 1 ; i < NICE_WIDTH ; i++)
+ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
+
+ raw_spin_lock_init(&grq.lock);
+ grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0;
+ grq.niffies = 0;
+ grq.last_jiffy = jiffies;
+ raw_spin_lock_init(&grq.iso_lock);
+ grq.iso_ticks = 0;
+ grq.iso_refractory = false;
+ grq.noc = 1;
+#ifdef CONFIG_SMP
+ init_defrootdomain();
+ grq.qnr = grq.idle_cpus = 0;
+ cpumask_clear(&grq.cpu_idle_map);
+#else
+ uprq = &per_cpu(runqueues, 0);
+#endif
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ rq->grq_lock = &grq.lock;
+ rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc =
+ rq->iowait_pc = rq->idle_pc = 0;
+ rq->dither = false;
+#ifdef CONFIG_SMP
+ rq->sticky_task = NULL;
+ rq->last_niffy = 0;
+ rq->sd = NULL;
+ rq->rd = NULL;
+ rq->online = false;
+ rq->cpu = i;
+ rq_attach_root(rq, &def_root_domain);
+#endif
+ atomic_set(&rq->nr_iowait, 0);
+ }
+
+#ifdef CONFIG_SMP
+ cpu_ids = i;
+ /*
+ * Set the base locality for cpu cache distance calculation to
+ * "distant" (3). Make sure the distance from a CPU to itself is 0.
+ */
+ for_each_possible_cpu(i) {
+ int j;
+
+ rq = cpu_rq(i);
+#ifdef CONFIG_SCHED_SMT
+ rq->siblings_idle = sole_cpu_idle;
+#endif
+#ifdef CONFIG_SCHED_MC
+ rq->cache_idle = sole_cpu_idle;
+#endif
+ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC);
+ for_each_possible_cpu(j) {
+ if (i == j)
+ rq->cpu_locality[j] = 0;
+ else
+ rq->cpu_locality[j] = 4;
+ }
+ }
+#endif
+
+ for (i = 0; i < PRIO_LIMIT; i++)
+ INIT_LIST_HEAD(grq.queue + i);
+ /* delimiter for bitsearch */
+ __set_bit(PRIO_LIMIT, grq.prio_bitmap);
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&init_task.preempt_notifiers);
+#endif
+
+ /*
+ * The boot idle thread does lazy MMU switching as well:
+ */
+ atomic_inc(&init_mm.mm_count);
+ enter_lazy_tlb(&init_mm, current);
+
+ /*
+ * Make us the idle thread. Technically, schedule() should not be
+ * called from this thread, however somewhere below it might be,
+ * but because we are the idle thread, we just pick up running again
+ * when this runqueue becomes "idle".
+ */
+ init_idle(current, smp_processor_id());
+
+#ifdef CONFIG_SMP
+ zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
+ /* May be allocated at isolcpus cmdline parse time */
+ if (cpu_isolated_map == NULL)
+ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
+ idle_thread_set_boot_cpu();
+#endif /* SMP */
+}
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+static inline int preempt_count_equals(int preempt_offset)
+{
+ int nested = preempt_count() + rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+}
+
+void __might_sleep(const char *file, int line, int preempt_offset)
+{
+ /*
+ * Blocking primitives will set (and therefore destroy) current->state,
+ * since we will exit with TASK_RUNNING make sure we enter with it,
+ * otherwise we will destroy state.
+ */
+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+ "do not call blocking ops when !TASK_RUNNING; "
+ "state=%lx set at [<%p>] %pS\n",
+ current->state,
+ (void *)current->task_state_change,
+ (void *)current->task_state_change);
+
+ ___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
+{
+ static unsigned long prev_jiffy; /* ratelimiting */
+
+ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+ !is_idle_task(current)) ||
+ system_state != SYSTEM_RUNNING || oops_in_progress)
+ return;
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ printk(KERN_ERR
+ "BUG: sleeping function called from invalid context at %s:%d\n",
+ file, line);
+ printk(KERN_ERR
+ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(),
+ current->pid, current->comm);
+
+ if (task_stack_end_corrupted(current))
+ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
+ debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
+#ifdef CONFIG_DEBUG_PREEMPT
+ if (!preempt_count_equals(preempt_offset)) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(current->preempt_disable_ip);
+ pr_cont("\n");
+ }
+#endif
+ dump_stack();
+}
+EXPORT_SYMBOL(___might_sleep);
+#endif
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static inline void normalise_rt_tasks(void)
+{
+ struct task_struct *g, *p;
+ unsigned long flags;
+ struct rq *rq;
+ int queued;
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, p) {
+ /*
+ * Only normalize user tasks:
+ */
+ if (p->flags & PF_KTHREAD)
+ continue;
+
+ if (!rt_task(p) && !iso_task(p))
+ continue;
+
+ rq = task_grq_lock(p, &flags);
+ queued = task_queued(p);
+ if (queued)
+ dequeue_task(p);
+ __setscheduler(p, rq, SCHED_NORMAL, 0, false);
+ if (queued) {
+ enqueue_task(p, rq);
+ try_preempt(p, rq);
+ }
+
+ task_grq_unlock(&flags);
+ }
+ read_unlock(&tasklist_lock);
+}
+
+void normalize_rt_tasks(void)
+{
+ normalise_rt_tasks();
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
+/*
+ * These functions are only useful for the IA64 MCA handling, or kdb.
+ *
+ * They can only be called when the whole system has been
+ * stopped - every CPU needs to be quiescent, and no scheduling
+ * activity can take place. Using them for anything else would
+ * be a serious bug, and as a result, they aren't even visible
+ * under any other configuration.
+ */
+
+/**
+ * curr_task - return the current task for a given cpu.
+ * @cpu: the processor in question.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
+ */
+struct task_struct *curr_task(int cpu)
+{
+ return cpu_curr(cpu);
+}
+
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
+/**
+ * set_curr_task - set the current task for a given cpu.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner. This function
+ * must be called with all CPU's synchronised, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+void set_curr_task(int cpu, struct task_struct *p)
+{
+ cpu_curr(cpu) = p;
+}
+
+#endif
+
+/*
+ * Use precise platform statistics if available:
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ *ut = p->utime;
+ *st = p->stime;
+}
+EXPORT_SYMBOL_GPL(task_cputime_adjusted);
+
+void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ struct task_cputime cputime;
+
+ thread_group_cputime(p, &cputime);
+
+ *ut = cputime.utime;
+ *st = cputime.stime;
+}
+
+void vtime_account_system_irqsafe(struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ vtime_account_system(tsk);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
+
+#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
+void vtime_task_switch(struct task_struct *prev)
+{
+ if (is_idle_task(prev))
+ vtime_account_idle(prev);
+ else
+ vtime_account_system(prev);
+
+ vtime_account_user(prev);
+ arch_vtime_task_switch(prev);
+}
+#endif
+
+#else
+/*
+ * Perform (stime * rtime) / total, but avoid multiplication overflow by
+ * losing precision when the numbers are big.
+ */
+static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
+{
+ u64 scaled;
+
+ for (;;) {
+ /* Make sure "rtime" is the bigger of stime/rtime */
+ if (stime > rtime) {
+ u64 tmp = rtime; rtime = stime; stime = tmp;
+ }
+
+ /* Make sure 'total' fits in 32 bits */
+ if (total >> 32)
+ goto drop_precision;
+
+ /* Does rtime (and thus stime) fit in 32 bits? */
+ if (!(rtime >> 32))
+ break;
+
+ /* Can we just balance rtime/stime rather than dropping bits? */
+ if (stime >> 31)
+ goto drop_precision;
+
+ /* We can grow stime and shrink rtime and try to make them both fit */
+ stime <<= 1;
+ rtime >>= 1;
+ continue;
+
+drop_precision:
+ /* We drop from rtime, it has more bits than stime */
+ rtime >>= 1;
+ total >>= 1;
+ }
+
+ /*
+ * Make sure gcc understands that this is a 32x32->64 multiply,
+ * followed by a 64/32->64 divide.
+ */
+ scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
+ return (__force cputime_t) scaled;
+}
+
+/*
+ * Adjust tick based cputime random precision against scheduler
+ * runtime accounting.
+ */
+static void cputime_adjust(struct task_cputime *curr,
+ struct prev_cputime *prev,
+ cputime_t *ut, cputime_t *st)
+{
+ cputime_t rtime, stime, utime, total;
+
+ stime = curr->stime;
+ total = stime + curr->utime;
+
+ /*
+ * Tick based cputime accounting depend on random scheduling
+ * timeslices of a task to be interrupted or not by the timer.
+ * Depending on these circumstances, the number of these interrupts
+ * may be over or under-optimistic, matching the real user and system
+ * cputime with a variable precision.
+ *
+ * Fix this by scaling these tick based values against the total
+ * runtime accounted by the CFS scheduler.
+ */
+ rtime = nsecs_to_cputime(curr->sum_exec_runtime);
+
+ /*
+ * Update userspace visible utime/stime values only if actual execution
+ * time is bigger than already exported. Note that can happen, that we
+ * provided bigger values due to scaling inaccuracy on big numbers.
+ */
+ if (prev->stime + prev->utime >= rtime)
+ goto out;
+
+ if (total) {
+ stime = scale_stime((__force u64)stime,
+ (__force u64)rtime, (__force u64)total);
+ utime = rtime - stime;
+ } else {
+ stime = rtime;
+ utime = 0;
+ }
+
+ /*
+ * If the tick based count grows faster than the scheduler one,
+ * the result of the scaling may go backward.
+ * Let's enforce monotonicity.
+ */
+ prev->stime = max(prev->stime, stime);
+ prev->utime = max(prev->utime, utime);
+
+out:
+ *ut = prev->utime;
+ *st = prev->stime;
+}
+
+void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ struct task_cputime cputime = {
+ .sum_exec_runtime = tsk_seruntime(p),
+ };
+
+ task_cputime(p, &cputime.utime, &cputime.stime);
+ cputime_adjust(&cputime, &p->prev_cputime, ut, st);
+}
+EXPORT_SYMBOL_GPL(task_cputime_adjusted);
+
+/*
+ * Must be called with siglock held.
+ */
+void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ struct task_cputime cputime;
+
+ thread_group_cputime(p, &cputime);
+ cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
+}
+#endif
+
+void init_idle_bootup_task(struct task_struct *idle)
+{}
+
+#ifdef CONFIG_SCHED_DEBUG
+void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{}
+
+void proc_sched_set_task(struct task_struct *p)
+{}
+#endif
+
+#ifdef CONFIG_SMP
+#define SCHED_LOAD_SHIFT (10)
+#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
+
+unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return SCHED_LOAD_SCALE;
+}
+
+unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
+{
+ unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ unsigned long smt_gain = sd->smt_gain;
+
+ smt_gain /= weight;
+
+ return smt_gain;
+}
+#endif
diff --git a/kernel/sched/bfs_sched.h b/kernel/sched/bfs_sched.h
new file mode 100644
index 000000000..5d97f7919
--- /dev/null
+++ b/kernel/sched/bfs_sched.h
@@ -0,0 +1,181 @@
+#include <linux/sched.h>
+#include <linux/cpuidle.h>
+#include <linux/stop_machine.h>
+
+#ifndef BFS_SCHED_H
+#define BFS_SCHED_H
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ * This data should only be modified by the local cpu.
+ */
+struct rq {
+ struct task_struct *curr, *idle, *stop;
+ struct mm_struct *prev_mm;
+
+ /* Pointer to grq spinlock */
+ raw_spinlock_t *grq_lock;
+
+ /* Stored data about rq->curr to work outside grq lock */
+ u64 rq_deadline;
+ unsigned int rq_policy;
+ int rq_time_slice;
+ u64 rq_last_ran;
+ int rq_prio;
+ bool rq_running; /* There is a task running */
+ int soft_affined; /* Running or queued tasks with this set as their rq */
+#ifdef CONFIG_SMT_NICE
+ struct mm_struct *rq_mm;
+ int rq_smt_bias; /* Policy/nice level bias across smt siblings */
+#endif
+ /* Accurate timekeeping data */
+ u64 timekeep_clock;
+ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
+ iowait_pc, idle_pc;
+ atomic_t nr_iowait;
+
+#ifdef CONFIG_SMP
+ int cpu; /* cpu of this runqueue */
+ bool online;
+ bool scaling; /* This CPU is managed by a scaling CPU freq governor */
+ struct task_struct *sticky_task;
+
+ struct root_domain *rd;
+ struct sched_domain *sd;
+ int *cpu_locality; /* CPU relative cache distance */
+#ifdef CONFIG_SCHED_SMT
+ bool (*siblings_idle)(int cpu);
+ /* See if all smt siblings are idle */
+#endif /* CONFIG_SCHED_SMT */
+#ifdef CONFIG_SCHED_MC
+ bool (*cache_idle)(int cpu);
+ /* See if all cache siblings are idle */
+#endif /* CONFIG_SCHED_MC */
+ u64 last_niffy; /* Last time this RQ updated grq.niffies */
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#ifdef CONFIG_PARAVIRT
+ u64 prev_steal_time;
+#endif /* CONFIG_PARAVIRT */
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ u64 prev_steal_time_rq;
+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
+
+ u64 clock, old_clock, last_tick;
+ u64 clock_task;
+ bool dither;
+
+#ifdef CONFIG_SCHEDSTATS
+
+ /* latency stats */
+ struct sched_info rq_sched_info;
+ unsigned long long rq_cpu_time;
+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
+
+ /* sys_sched_yield() stats */
+ unsigned int yld_count;
+
+ /* schedule() stats */
+ unsigned int sched_switch;
+ unsigned int sched_count;
+ unsigned int sched_goidle;
+
+ /* try_to_wake_up() stats */
+ unsigned int ttwu_count;
+ unsigned int ttwu_local;
+#endif /* CONFIG_SCHEDSTATS */
+#ifdef CONFIG_CPU_IDLE
+ /* Must be inspected within a rcu lock section */
+ struct cpuidle_state *idle_state;
+#endif
+};
+
+#ifdef CONFIG_SMP
+struct rq *cpu_rq(int cpu);
+#endif
+
+#ifndef CONFIG_SMP
+extern struct rq *uprq;
+#define cpu_rq(cpu) (uprq)
+#define this_rq() (uprq)
+#define raw_rq() (uprq)
+#define task_rq(p) (uprq)
+#define cpu_curr(cpu) ((uprq)->curr)
+#else /* CONFIG_SMP */
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+#define this_rq() this_cpu_ptr(&runqueues)
+#define raw_rq() raw_cpu_ptr(&runqueues)
+#endif /* CONFIG_SMP */
+
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+ return READ_ONCE(rq->clock);
+}
+
+static inline u64 rq_clock(struct rq *rq)
+{
+ lockdep_assert_held(rq->grq_lock);
+ return rq->clock;
+}
+
+static inline u64 rq_clock_task(struct rq *rq)
+{
+ lockdep_assert_held(rq->grq_lock);
+ return rq->clock_task;
+}
+
+extern struct mutex sched_domains_mutex;
+
+#define rcu_dereference_check_sched_domain(p) \
+ rcu_dereference_check((p), \
+ lockdep_is_held(&sched_domains_mutex))
+
+/*
+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
+ * See detach_destroy_domains: synchronize_sched for details.
+ *
+ * The domain tree of any CPU may only be accessed from within
+ * preempt-disabled sections.
+ */
+#define for_each_domain(cpu, __sd) \
+ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
+
+static inline void sched_ttwu_pending(void) { }
+
+static inline int task_on_rq_queued(struct task_struct *p)
+{
+ return p->on_rq;
+}
+
+#ifdef CONFIG_SMP
+
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+static inline void idle_set_state(struct rq *rq,
+ struct cpuidle_state *idle_state)
+{
+ rq->idle_state = idle_state;
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+ WARN_ON(!rcu_read_lock_held());
+ return rq->idle_state;
+}
+#else
+static inline void idle_set_state(struct rq *rq,
+ struct cpuidle_state *idle_state)
+{
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+ return NULL;
+}
+#endif
+#endif /* BFS_SCHED_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 05114b15b..544c98aac 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5378,6 +5378,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_UP_PREPARE:
rq->calc_load_update = calc_load_update;
+ account_reset_rq(rq);
break;
case CPU_ONLINE:
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 544a7133c..bbf813929 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -13,7 +13,11 @@
#include <trace/events/power.h>
+#ifdef CONFIG_SCHED_BFS
+#include "bfs_sched.h"
+#else
#include "sched.h"
+#endif
/**
* sched_idle_set_state - Record idle state for the current CPU.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ef5875fff..60c5b6ba4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1773,3 +1773,16 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+static inline void account_reset_rq(struct rq *rq)
+{
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ rq->prev_irq_time = 0;
+#endif
+#ifdef CONFIG_PARAVIRT
+ rq->prev_steal_time = 0;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ rq->prev_steal_time_rq = 0;
+#endif
+}
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 87e2c9f0c..7466a0bb2 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -4,7 +4,11 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
+#ifndef CONFIG_SCHED_BFS
#include "sched.h"
+#else
+#include "bfs_sched.h"
+#endif
/*
* bump this up when changing the output format or the meaning of an existing
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f5102fabe..c4887773a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -125,7 +125,13 @@ static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static int __maybe_unused four = 4;
static unsigned long one_ul = 1;
-static int one_hundred = 100;
+static int __maybe_unused one_hundred = 100;
+#ifdef CONFIG_SCHED_BFS
+extern int rr_interval;
+extern int sched_interactive;
+extern int sched_iso_cpu;
+static int __read_mostly one_thousand = 1000;
+#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@@ -260,7 +266,7 @@ static struct ctl_table sysctl_base_table[] = {
{ }
};
-#ifdef CONFIG_SCHED_DEBUG
+#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_BFS)
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
@@ -277,6 +283,7 @@ static int max_extfrag_threshold = 1000;
#endif
static struct ctl_table kern_table[] = {
+#ifndef CONFIG_SCHED_BFS
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
@@ -445,6 +452,7 @@ static struct ctl_table kern_table[] = {
.extra1 = &one,
},
#endif
+#endif /* !CONFIG_SCHED_BFS */
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",
@@ -1002,6 +1010,35 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
+#ifdef CONFIG_SCHED_BFS
+ {
+ .procname = "rr_interval",
+ .data = &rr_interval,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &one,
+ .extra2 = &one_thousand,
+ },
+ {
+ .procname = "interactive",
+ .data = &sched_interactive,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "iso_cpu",
+ .data = &sched_iso_cpu,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one_hundred,
+ },
+#endif
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
.procname = "spin_retry",
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 7e7746a42..10a1d7dc9 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
}
mnt = task_active_pid_ns(current)->proc_mnt;
- file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
+ file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
result = PTR_ERR(file);
if (IS_ERR(file))
goto out_putname;
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 53fa971d0..bce3211e7 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -118,3 +118,4 @@ void task_work_run(void)
} while (work);
}
}
+EXPORT_SYMBOL_GPL(task_work_run);
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 4008d9f95..6931b6e3c 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -89,7 +89,7 @@ config NO_HZ_IDLE
config NO_HZ_FULL
bool "Full dynticks system (tickless)"
# NO_HZ_COMMON dependency
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
+ depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS && !SCHED_BFS
# We need at least one periodic CPU for timekeeping
depends on SMP
depends on HAVE_CONTEXT_TRACKING
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index f5e86d282..7a48442ed 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -448,7 +448,7 @@ static void cleanup_timers(struct list_head *head)
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+ add_device_randomness((const void*) &tsk_seruntime(tsk),
sizeof(unsigned long long));
cleanup_timers(tsk->cpu_timers);
@@ -878,7 +878,7 @@ static void check_thread_timers(struct task_struct *tsk,
tsk_expires->virt_exp = expires_to_cputime(expires);
tsk_expires->sched_exp = check_timers_list(++timers, firing,
- tsk->se.sum_exec_runtime);
+ tsk_seruntime(tsk));
/*
* Check for the special case thread timers.
@@ -889,7 +889,7 @@ static void check_thread_timers(struct task_struct *tsk,
READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
if (hard != RLIM_INFINITY &&
- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
@@ -897,7 +897,7 @@ static void check_thread_timers(struct task_struct *tsk,
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
+ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
@@ -1144,7 +1144,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
struct task_cputime task_sample;
task_cputime(tsk, &task_sample.utime, &task_sample.stime);
- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
+ task_sample.sum_exec_runtime = tsk_seruntime(tsk);
if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
return 1;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d9293402e..8305cbb2d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4949,7 +4949,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
spd.nr_pages = i;
- ret = splice_to_pipe(pipe, &spd);
+ if (i)
+ ret = splice_to_pipe(pipe, &spd);
+ else
+ ret = 0;
out:
splice_shrink_spd(&spd);
return ret;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index e4e56589e..be3222b7d 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -109,8 +109,12 @@ static int func_prolog_dec(struct trace_array *tr,
return 0;
local_save_flags(*flags);
- /* slight chance to get a false positive on tracing_cpu */
- if (!irqs_disabled_flags(*flags))
+ /*
+ * Slight chance to get a false positive on tracing_cpu,
+ * although I'm starting to think there isn't a chance.
+ * Leave this for now just to be paranoid.
+ */
+ if (!irqs_disabled_flags(*flags) && !preempt_count())
return 0;
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 060df67db..f96f0383f 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -296,6 +296,9 @@ static int t_show(struct seq_file *m, void *v)
const char *str = *fmt;
int i;
+ if (!*fmt)
+ return 0;
+
seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
/*
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b0f86ea77..287cf721c 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1039,10 +1039,15 @@ static int trace_wakeup_test_thread(void *data)
{
/* Make this a -deadline thread */
static const struct sched_attr attr = {
+#ifdef CONFIG_SCHED_BFS
+ /* No deadline on BFS, use RR */
+ .sched_policy = SCHED_RR,
+#else
.sched_policy = SCHED_DEADLINE,
.sched_runtime = 100000ULL,
.sched_deadline = 10000000ULL,
.sched_period = 10000000ULL
+#endif
};
struct wakeup_test_data *x = data;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index b3ace6ebb..9acb29f28 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -923,6 +923,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
* both lockup detectors are disabled if proc_watchdog_update()
* returns an error.
*/
+ if (old == new)
+ goto out;
+
err = proc_watchdog_update();
}
out:
@@ -967,7 +970,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
int proc_watchdog_thresh(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int err, old;
+ int err, old, new;
get_online_cpus();
mutex_lock(&watchdog_proc_mutex);
@@ -987,6 +990,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
/*
* Update the sample period. Restore on failure.
*/
+ new = ACCESS_ONCE(watchdog_thresh);
+ if (old == new)
+ goto out;
+
set_sample_period();
err = proc_watchdog_update();
if (err) {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8bfd1aca7..897979c8e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1258,7 +1258,7 @@ config TORTURE_TEST
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !SCHED_BFS
select TORTURE_TEST
select SRCU
select TASKS_RCU
diff --git a/mm/Makefile b/mm/Makefile
index 2ed43191f..e3a53f5ba 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,7 +21,7 @@ obj-y := filemap.o mempool.o oom_kill.o \
mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
- debug.o $(mmu-y)
+ prfile.o debug.o $(mmu-y)
obj-y += init-mm.o
diff --git a/mm/filemap.c b/mm/filemap.c
index da7a35d83..f800f8749 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2229,7 +2229,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
+ vma_file_update_time(vma);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d06cae2de..caf3bf73b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1262,7 +1262,7 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
return limit;
}
-static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order)
{
struct oom_control oc = {
@@ -1340,6 +1340,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
}
unlock:
mutex_unlock(&oom_lock);
+ return chosen;
}
#if MAX_NUMNODES > 1
@@ -5051,6 +5052,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned long nr_pages;
unsigned long high;
int err;
@@ -5061,6 +5063,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
memcg->high = high;
+ nr_pages = page_counter_read(&memcg->memory);
+ if (nr_pages > high)
+ try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
+ GFP_KERNEL, true);
+
memcg_wb_domain_size_changed(memcg);
return nbytes;
}
@@ -5082,6 +5089,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+ unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
+ bool drained = false;
unsigned long max;
int err;
@@ -5090,9 +5099,36 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (err)
return err;
- err = mem_cgroup_resize_limit(memcg, max);
- if (err)
- return err;
+ xchg(&memcg->memory.limit, max);
+
+ for (;;) {
+ unsigned long nr_pages = page_counter_read(&memcg->memory);
+
+ if (nr_pages <= max)
+ break;
+
+ if (signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+
+ if (!drained) {
+ drain_all_stock(memcg);
+ drained = true;
+ continue;
+ }
+
+ if (nr_reclaims) {
+ if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
+ GFP_KERNEL, true))
+ nr_reclaims--;
+ continue;
+ }
+
+ mem_cgroup_events(memcg, MEMCG_OOM, 1);
+ if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
+ break;
+ }
memcg_wb_domain_size_changed(memcg);
return nbytes;
diff --git a/mm/memory.c b/mm/memory.c
index 8132787ae..3f7de66ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2042,7 +2042,7 @@ static inline int wp_page_reuse(struct mm_struct *mm,
}
if (!page_mkwrite)
- file_update_time(vma->vm_file);
+ vma_file_update_time(vma);
}
return VM_FAULT_WRITE;
diff --git a/mm/mmap.c b/mm/mmap.c
index 76d1ec291..fdd163e22 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -290,7 +290,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
- fput(vma->vm_file);
+ vma_fput(vma);
mpol_put(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
return next;
@@ -909,7 +909,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (remove_next) {
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
- fput(file);
+ vma_fput(vma);
}
if (next->anon_vma)
anon_vma_merge(vma, next);
@@ -1683,8 +1683,8 @@ out:
return addr;
unmap_and_free_vma:
+ vma_fput(vma);
vma->vm_file = NULL;
- fput(file);
/* Undo any partial mapping done by a device driver. */
unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
@@ -2479,7 +2479,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_free_mpol;
if (new->vm_file)
- get_file(new->vm_file);
+ vma_get_file(new);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
@@ -2498,7 +2498,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
- fput(new->vm_file);
+ vma_fput(new);
unlink_anon_vmas(new);
out_free_mpol:
mpol_put(vma_policy(new));
@@ -2640,7 +2640,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
struct vm_area_struct *vma;
unsigned long populate = 0;
unsigned long ret = -EINVAL;
- struct file *file;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
"See Documentation/vm/remap_file_pages.txt.\n",
@@ -2708,10 +2707,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
}
}
- file = get_file(vma->vm_file);
+ vma_get_file(vma);
ret = do_mmap_pgoff(vma->vm_file, start, size,
prot, flags, pgoff, &populate);
- fput(file);
+ vma_fput(vma);
out:
up_write(&mm->mmap_sem);
if (populate)
@@ -2982,7 +2981,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
- get_file(new_vma->vm_file);
+ vma_get_file(new_vma);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent);
diff --git a/mm/nommu.c b/mm/nommu.c
index fbf6f0f1d..1a4a06d09 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -671,7 +671,7 @@ static void __put_nommu_region(struct vm_region *region)
up_write(&nommu_region_sem);
if (region->vm_file)
- fput(region->vm_file);
+ vmr_fput(region);
/* IO memory and memory shared directly out of the pagecache
* from ramfs/tmpfs mustn't be released here */
@@ -829,7 +829,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
if (vma->vm_file)
- fput(vma->vm_file);
+ vma_fput(vma);
put_nommu_region(vma->vm_region);
kmem_cache_free(vm_area_cachep, vma);
}
@@ -1355,7 +1355,7 @@ unsigned long do_mmap(struct file *file,
goto error_just_free;
}
}
- fput(region->vm_file);
+ vmr_fput(region);
kmem_cache_free(vm_region_jar, region);
region = pregion;
result = start;
@@ -1430,10 +1430,10 @@ error_just_free:
up_write(&nommu_region_sem);
error:
if (region->vm_file)
- fput(region->vm_file);
+ vmr_fput(region);
kmem_cache_free(vm_region_jar, region);
if (vma->vm_file)
- fput(vma->vm_file);
+ vma_fput(vma);
kmem_cache_free(vm_area_cachep, vma);
return ret;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 58bc48250..497866021 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -661,34 +661,28 @@ static inline void __free_one_page(struct page *page,
unsigned long combined_idx;
unsigned long uninitialized_var(buddy_idx);
struct page *buddy;
- unsigned int max_order = MAX_ORDER;
+ unsigned int max_order;
+
+ max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
VM_BUG_ON(migratetype == -1);
- if (is_migrate_isolate(migratetype)) {
- /*
- * We restrict max order of merging to prevent merge
- * between freepages on isolate pageblock and normal
- * pageblock. Without this, pageblock isolation
- * could cause incorrect freepage accounting.
- */
- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
- } else {
+ if (likely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
- }
- page_idx = pfn & ((1 << max_order) - 1);
+ page_idx = pfn & ((1 << MAX_ORDER) - 1);
VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
+continue_merging:
while (order < max_order - 1) {
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
if (!page_is_buddy(page, buddy, order))
- break;
+ goto done_merging;
/*
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order.
@@ -705,6 +699,32 @@ static inline void __free_one_page(struct page *page,
page_idx = combined_idx;
order++;
}
+ if (max_order < MAX_ORDER) {
+ /* If we are here, it means order is >= pageblock_order.
+ * We want to prevent merge between freepages on isolate
+ * pageblock and normal pageblock. Without this, pageblock
+ * isolation could cause incorrect freepage or CMA accounting.
+ *
+ * We don't want to hit this code for the more frequent
+ * low-order merging.
+ */
+ if (unlikely(has_isolate_pageblock(zone))) {
+ int buddy_mt;
+
+ buddy_idx = __find_buddy_index(page_idx, order);
+ buddy = page + (buddy_idx - page_idx);
+ buddy_mt = get_pageblock_migratetype(buddy);
+
+ if (migratetype != buddy_mt
+ && (is_migrate_isolate(migratetype) ||
+ is_migrate_isolate(buddy_mt)))
+ goto done_merging;
+ }
+ max_order++;
+ goto continue_merging;
+ }
+
+done_merging:
set_page_order(page, order);
/*
diff --git a/mm/prfile.c b/mm/prfile.c
new file mode 100644
index 000000000..b323b8af9
--- /dev/null
+++ b/mm/prfile.c
@@ -0,0 +1,86 @@
+/*
+ * Mainly for aufs which mmap(2) diffrent file and wants to print different path
+ * in /proc/PID/maps.
+ * Call these functions via macros defined in linux/mm.h.
+ *
+ * See Documentation/filesystems/aufs/design/06mmap.txt
+ *
+ * Copyright (c) 2014 Junjro R. Okajima
+ * Copyright (c) 2014 Ian Campbell
+ */
+
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+
+/* #define PRFILE_TRACE */
+static inline void prfile_trace(struct file *f, struct file *pr,
+ const char func[], int line, const char func2[])
+{
+#ifdef PRFILE_TRACE
+ if (pr)
+ pr_info("%s:%d: %s, %s\n", func, line, func2,
+ f ? (char *)f->f_path.dentry->d_name.name : "(null)");
+#endif
+}
+
+void vma_do_file_update_time(struct vm_area_struct *vma, const char func[],
+ int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ file_update_time(f);
+ if (f && pr)
+ file_update_time(pr);
+}
+
+struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[],
+ int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ return (f && pr) ? pr : f;
+}
+
+void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ get_file(f);
+ if (f && pr)
+ get_file(pr);
+}
+
+void vma_do_fput(struct vm_area_struct *vma, const char func[], int line)
+{
+ struct file *f = vma->vm_file, *pr = vma->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ fput(f);
+ if (f && pr)
+ fput(pr);
+}
+
+#ifndef CONFIG_MMU
+struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[],
+ int line)
+{
+ struct file *f = region->vm_file, *pr = region->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ return (f && pr) ? pr : f;
+}
+
+void vmr_do_fput(struct vm_region *region, const char func[], int line)
+{
+ struct file *f = region->vm_file, *pr = region->vm_prfile;
+
+ prfile_trace(f, pr, func, line, __func__);
+ fput(f);
+ if (f && pr)
+ fput(pr);
+}
+#endif /* !CONFIG_MMU */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 5a5089cb6..1363b8ffd 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -5979,6 +5979,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
+ if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
+ MGMT_STATUS_INVALID_PARAMS);
+
flags = __le32_to_cpu(cp->flags);
timeout = __le16_to_cpu(cp->timeout);
duration = __le16_to_cpu(cp->duration);
diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
index f085f5968..ce8cc9c00 100644
--- a/scripts/coccinelle/iterators/use_after_iter.cocci
+++ b/scripts/coccinelle/iterators/use_after_iter.cocci
@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
|
sizeof(<+...c...+>)
|
-&c->member
+ &c->member
|
c = E
|
diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py
index 25db8cff4..0a35d6dbf 100644
--- a/scripts/gdb/linux/modules.py
+++ b/scripts/gdb/linux/modules.py
@@ -73,10 +73,11 @@ class LxLsmod(gdb.Command):
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
+ layout = module['core_layout']
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
- address=str(module['module_core']).split()[0],
+ address=str(layout['base']).split()[0],
name=module['name'].string(),
- size=str(module['core_size']),
+ size=str(layout['size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 627750cb4..9a0f8923f 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -108,7 +108,7 @@ lx-symbols command."""
def load_module_symbols(self, module):
module_name = module['name'].string()
- module_addr = str(module['module_core']).split()[0]
+ module_addr = str(module['core_layout']['base']).split()[0]
module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index d79cba4ce..ebced77de 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
defconfig: $(obj)/conf
ifeq ($(KBUILD_DEFCONFIG),)
$< $(silent) --defconfig $(Kconfig)
-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
+else
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
@$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
else
@$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
$(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
endif
+endif
%_defconfig: $(obj)/conf
$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 71004daef..fe44d68e9 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -131,11 +131,11 @@ echo 'rm -rf $RPM_BUILD_ROOT'
echo ""
echo "%post"
echo "if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then"
-echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm"
-echo "cp /boot/System.map-$KERNELRELEASE /boot/System.map-$KERNELRELEASE-rpm"
+echo "cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm"
+echo "cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm"
echo "rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE"
-echo "/sbin/installkernel $KERNELRELEASE /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
-echo "rm -f /boot/vmlinuz-$KERNELRELEASE-rpm /boot/System.map-$KERNELRELEASE-rpm"
+echo "/sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
+echo "rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm"
echo "fi"
echo ""
echo "%files"
diff --git a/security/commoncap.c b/security/commoncap.c
index 48071ed7c..50a1a40f0 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -1058,12 +1058,14 @@ int cap_mmap_addr(unsigned long addr)
}
return ret;
}
+EXPORT_SYMBOL_GPL(cap_mmap_addr);
int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
return 0;
}
+EXPORT_SYMBOL_GPL(cap_mmap_file);
#ifdef CONFIG_SECURITY
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 03c1652c9..f88c84bf1 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -7,6 +7,7 @@
#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
@@ -849,6 +850,7 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
access);
}
+EXPORT_SYMBOL_GPL(__devcgroup_inode_permission);
int devcgroup_inode_mknod(int mode, dev_t dev)
{
diff --git a/security/security.c b/security/security.c
index e8ffd92ae..51fa026d4 100644
--- a/security/security.c
+++ b/security/security.c
@@ -433,6 +433,7 @@ int security_path_rmdir(struct path *dir, struct dentry *dentry)
return 0;
return call_int_hook(path_rmdir, 0, dir, dentry);
}
+EXPORT_SYMBOL_GPL(security_path_rmdir);
int security_path_unlink(struct path *dir, struct dentry *dentry)
{
@@ -449,6 +450,7 @@ int security_path_symlink(struct path *dir, struct dentry *dentry,
return 0;
return call_int_hook(path_symlink, 0, dir, dentry, old_name);
}
+EXPORT_SYMBOL_GPL(security_path_symlink);
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
@@ -457,6 +459,7 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
return 0;
return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
}
+EXPORT_SYMBOL_GPL(security_path_link);
int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry,
@@ -484,6 +487,7 @@ int security_path_truncate(struct path *path)
return 0;
return call_int_hook(path_truncate, 0, path);
}
+EXPORT_SYMBOL_GPL(security_path_truncate);
int security_path_chmod(struct path *path, umode_t mode)
{
@@ -491,6 +495,7 @@ int security_path_chmod(struct path *path, umode_t mode)
return 0;
return call_int_hook(path_chmod, 0, path, mode);
}
+EXPORT_SYMBOL_GPL(security_path_chmod);
int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
@@ -498,6 +503,7 @@ int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
return 0;
return call_int_hook(path_chown, 0, path, uid, gid);
}
+EXPORT_SYMBOL_GPL(security_path_chown);
int security_path_chroot(struct path *path)
{
@@ -583,6 +589,7 @@ int security_inode_readlink(struct dentry *dentry)
return 0;
return call_int_hook(inode_readlink, 0, dentry);
}
+EXPORT_SYMBOL_GPL(security_inode_readlink);
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu)
@@ -598,6 +605,7 @@ int security_inode_permission(struct inode *inode, int mask)
return 0;
return call_int_hook(inode_permission, 0, inode, mask);
}
+EXPORT_SYMBOL_GPL(security_inode_permission);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
@@ -736,6 +744,7 @@ int security_file_permission(struct file *file, int mask)
return fsnotify_perm(file, mask);
}
+EXPORT_SYMBOL_GPL(security_file_permission);
int security_file_alloc(struct file *file)
{
@@ -795,6 +804,7 @@ int security_mmap_file(struct file *file, unsigned long prot,
return ret;
return ima_file_mmap(file, prot);
}
+EXPORT_SYMBOL_GPL(security_mmap_file);
int security_mmap_addr(unsigned long addr)
{
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6b5a811e0..3a9b66c6e 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
pcm_err(substream->pcm,
- "BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
+ "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
name, pos, runtime->buffer_size,
runtime->period_size);
}
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index e361024ea..d1a4d6973 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -611,6 +611,22 @@ int snd_hdac_power_up_pm(struct hdac_device *codec)
}
EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+/* like snd_hdac_power_up_pm(), but only increment the pm count when
+ * already powered up. Returns -1 if not powered up, 1 if incremented
+ * or 0 if unchanged. Only used in hdac_regmap.c
+ */
+int snd_hdac_keep_power_up(struct hdac_device *codec)
+{
+ if (!atomic_inc_not_zero(&codec->in_pm)) {
+ int ret = pm_runtime_get_if_in_use(&codec->dev);
+ if (!ret)
+ return -1;
+ if (ret < 0)
+ return 0;
+ }
+ return 1;
+}
+
/**
* snd_hdac_power_down_pm - power down the codec
* @codec: the codec object
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index eb8f7c30c..bdbcd6b75 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -21,13 +21,16 @@
#include <sound/hdaudio.h>
#include <sound/hda_regmap.h>
-#ifdef CONFIG_PM
-#define codec_is_running(codec) \
- (atomic_read(&(codec)->in_pm) || \
- !pm_runtime_suspended(&(codec)->dev))
-#else
-#define codec_is_running(codec) true
-#endif
+static int codec_pm_lock(struct hdac_device *codec)
+{
+ return snd_hdac_keep_power_up(codec);
+}
+
+static void codec_pm_unlock(struct hdac_device *codec, int lock)
+{
+ if (lock == 1)
+ snd_hdac_power_down_pm(codec);
+}
#define get_verb(reg) (((reg) >> 8) & 0xfff)
@@ -238,20 +241,28 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
struct hdac_device *codec = context;
int verb = get_verb(reg);
int err;
+ int pm_lock = 0;
- if (!codec_is_running(codec) && verb != AC_VERB_GET_POWER_STATE)
- return -EAGAIN;
+ if (verb != AC_VERB_GET_POWER_STATE) {
+ pm_lock = codec_pm_lock(codec);
+ if (pm_lock < 0)
+ return -EAGAIN;
+ }
reg |= (codec->addr << 28);
- if (is_stereo_amp_verb(reg))
- return hda_reg_read_stereo_amp(codec, reg, val);
- if (verb == AC_VERB_GET_PROC_COEF)
- return hda_reg_read_coef(codec, reg, val);
+ if (is_stereo_amp_verb(reg)) {
+ err = hda_reg_read_stereo_amp(codec, reg, val);
+ goto out;
+ }
+ if (verb == AC_VERB_GET_PROC_COEF) {
+ err = hda_reg_read_coef(codec, reg, val);
+ goto out;
+ }
if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
reg &= ~AC_AMP_FAKE_MUTE;
err = snd_hdac_exec_verb(codec, reg, 0, val);
if (err < 0)
- return err;
+ goto out;
/* special handling for asymmetric reads */
if (verb == AC_VERB_GET_POWER_STATE) {
if (*val & AC_PWRST_ERROR)
@@ -259,7 +270,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
else /* take only the actual state */
*val = (*val >> 4) & 0x0f;
}
- return 0;
+ out:
+ codec_pm_unlock(codec, pm_lock);
+ return err;
}
static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
@@ -267,6 +280,7 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
struct hdac_device *codec = context;
unsigned int verb;
int i, bytes, err;
+ int pm_lock = 0;
if (codec->caps_overwriting)
return 0;
@@ -275,14 +289,21 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
reg |= (codec->addr << 28);
verb = get_verb(reg);
- if (!codec_is_running(codec) && verb != AC_VERB_SET_POWER_STATE)
- return codec->lazy_cache ? 0 : -EAGAIN;
+ if (verb != AC_VERB_SET_POWER_STATE) {
+ pm_lock = codec_pm_lock(codec);
+ if (pm_lock < 0)
+ return codec->lazy_cache ? 0 : -EAGAIN;
+ }
- if (is_stereo_amp_verb(reg))
- return hda_reg_write_stereo_amp(codec, reg, val);
+ if (is_stereo_amp_verb(reg)) {
+ err = hda_reg_write_stereo_amp(codec, reg, val);
+ goto out;
+ }
- if (verb == AC_VERB_SET_PROC_COEF)
- return hda_reg_write_coef(codec, reg, val);
+ if (verb == AC_VERB_SET_PROC_COEF) {
+ err = hda_reg_write_coef(codec, reg, val);
+ goto out;
+ }
switch (verb & 0xf00) {
case AC_VERB_SET_AMP_GAIN_MUTE:
@@ -319,10 +340,12 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff);
err = snd_hdac_exec_verb(codec, reg, 0, NULL);
if (err < 0)
- return err;
+ goto out;
}
- return 0;
+ out:
+ codec_pm_unlock(codec, pm_lock);
+ return err;
}
static const struct regmap_config hda_regmap_cfg = {
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index c1c855a6c..a47e8ae0e 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
snd_hda_gen_update_outputs(codec);
if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
- spec->gpio_data = spec->gen.hp_jack_present ?
- spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
+ if (spec->gen.automute_speaker)
+ spec->gpio_data = spec->gen.hp_jack_present ?
+ spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
+ else
+ spec->gpio_data =
+ spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, spec->gpio_data);
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 6122b8ca8..56fefbd85 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- if (codec->core.vendor_id != 0x14f150f2)
+ switch (codec->core.vendor_id) {
+ case 0x14f150f2: /* CX20722 */
+ case 0x14f150f4: /* CX20724 */
+ break;
+ default:
return;
+ }
/* Turn the CX20722 codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index bcbc4ee10..e68fa449e 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -152,13 +152,17 @@ struct hdmi_spec {
struct hda_pcm_stream pcm_playback;
/* i915/powerwell (Haswell+/Valleyview+) specific */
+ bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
struct i915_audio_component_audio_ops i915_audio_ops;
bool i915_bound; /* was i915 bound in this driver? */
};
#ifdef CONFIG_SND_HDA_I915
-#define codec_has_acomp(codec) \
- ((codec)->bus->core.audio_component != NULL)
+static inline bool codec_has_acomp(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec = codec->spec;
+ return spec->use_acomp_notifier;
+}
#else
#define codec_has_acomp(codec) false
#endif
@@ -1562,6 +1566,7 @@ static void update_eld(struct hda_codec *codec,
eld->eld_size) != 0)
eld_changed = true;
+ pin_eld->monitor_present = eld->monitor_present;
pin_eld->eld_valid = eld->eld_valid;
pin_eld->eld_size = eld->eld_size;
if (eld->eld_valid)
@@ -1665,11 +1670,10 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
int size;
mutex_lock(&per_pin->lock);
+ eld->monitor_present = false;
size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid,
&eld->monitor_present, eld->eld_buffer,
ELD_MAX_SIZE);
- if (size < 0)
- goto unlock;
if (size > 0) {
size = min(size, ELD_MAX_SIZE);
if (snd_hdmi_parse_eld(codec, &eld->info,
@@ -1873,7 +1877,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
/* Todo: add DP1.2 MST audio support later */
- snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
+ if (codec_has_acomp(codec))
+ snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate);
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
mutex_lock(&per_pin->lock);
@@ -2432,6 +2437,10 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
struct hda_codec *codec = audio_ptr;
int pin_nid = port + 0x04;
+ /* we assume only from port-B to port-D */
+ if (port < 1 || port > 3)
+ return;
+
/* skip notification during system suspend (but not in runtime PM);
* the state will be updated at resume
*/
@@ -2456,11 +2465,24 @@ static int patch_generic_hdmi(struct hda_codec *codec)
codec->spec = spec;
hdmi_array_init(spec, 4);
- /* Try to bind with i915 for any Intel codecs (if not done yet) */
- if (!codec_has_acomp(codec) &&
- (codec->core.vendor_id >> 16) == 0x8086)
- if (!snd_hdac_i915_init(&codec->bus->core))
- spec->i915_bound = true;
+#ifdef CONFIG_SND_HDA_I915
+ /* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */
+ if ((codec->core.vendor_id >> 16) == 0x8086 &&
+ is_haswell_plus(codec)) {
+#if 0
+ /* on-demand binding leads to an unbalanced refcount when
+ * both i915 and hda drivers are probed concurrently;
+ * disabled temporarily for now
+ */
+ if (!codec->bus->core.audio_component)
+ if (!snd_hdac_i915_init(&codec->bus->core))
+ spec->i915_bound = true;
+#endif
+ /* use i915 audio component notifier for hotplug */
+ if (codec->bus->core.audio_component)
+ spec->use_acomp_notifier = true;
+ }
+#endif
if (is_haswell_plus(codec)) {
intel_haswell_enable_all_pins(codec, true);
@@ -3659,6 +3681,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 93d2156b6..4f5ca0b9c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5556,6 +5556,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 42bcbac80..ccdab29a8 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
static struct snd_pci_quirk intel8x0_clock_list[] = {
SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
+ SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 2ed260b10..7ccbcaf6a 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
unsigned char data[3];
int err, crate;
+ if (get_iface_desc(alts)->bNumEndpoints < 1)
+ return -EINVAL;
ep = get_endpoint(alts, 0)->bEndpointAddress;
/* if endpoint doesn't have sampling rate control, bail out */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7b1cb365f..c07a7eda4 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -438,6 +438,9 @@ exit_clear:
*
* New endpoints will be added to chip->ep_list and must be freed by
* calling snd_usb_endpoint_free().
+ *
+ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
+ * bNumEndpoints > 1 beforehand.
*/
struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
struct usb_host_interface *alts,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 279025650..f6c3bf79a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1519,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
/* use known values for that card: interface#1 altsetting#1 */
iface = usb_ifnum_to_if(chip->dev, 1);
+ if (!iface || iface->num_altsetting < 2)
+ return -EINVAL;
alts = &iface->altsetting[1];
+ if (get_iface_desc(alts)->bNumEndpoints < 1)
+ return -EINVAL;
ep = get_endpoint(alts, 0)->bEndpointAddress;
err = snd_usb_ctl_msg(chip->dev,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 9245f52d4..44d178ee9 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
unsigned char data[1];
int err;
+ if (get_iface_desc(alts)->bNumEndpoints < 1)
+ return -EINVAL;
ep = get_endpoint(alts, 0)->bEndpointAddress;
data[0] = 1;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index c458d60d5..cd7eac28e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -150,6 +150,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
usb_audio_err(chip, "cannot memdup\n");
return -ENOMEM;
}
+ INIT_LIST_HEAD(&fp->list);
if (fp->nr_rates > MAX_NR_RATES) {
kfree(fp);
return -EINVAL;
@@ -167,19 +168,20 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
stream = (fp->endpoint & USB_DIR_IN)
? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
err = snd_usb_add_audio_stream(chip, stream, fp);
- if (err < 0) {
- kfree(fp);
- kfree(rate_table);
- return err;
- }
+ if (err < 0)
+ goto error;
if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
fp->altset_idx >= iface->num_altsetting) {
- kfree(fp);
- kfree(rate_table);
- return -EINVAL;
+ err = -EINVAL;
+ goto error;
}
alts = &iface->altsetting[fp->altset_idx];
altsd = get_iface_desc(alts);
+ if (altsd->bNumEndpoints < 1) {
+ err = -EINVAL;
+ goto error;
+ }
+
fp->protocol = altsd->bInterfaceProtocol;
if (fp->datainterval == 0)
@@ -190,6 +192,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
snd_usb_init_pitch(chip, fp->iface, alts, fp);
snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
return 0;
+
+ error:
+ list_del(&fp->list); /* unlink for avoiding double-free */
+ kfree(fp);
+ kfree(rate_table);
+ return err;
}
static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
@@ -462,6 +470,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
fp->datainterval = 0;
fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+ INIT_LIST_HEAD(&fp->list);
switch (fp->maxpacksize) {
case 0x120:
@@ -485,6 +494,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
err = snd_usb_add_audio_stream(chip, stream, fp);
if (err < 0) {
+ list_del(&fp->list); /* unlink for avoiding double-free */
kfree(fp);
return err;
}
@@ -1121,6 +1131,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
switch (chip->usb_id) {
case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+ case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index c4dc577ab..8e9548bc1 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -314,7 +314,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
/*
* add this endpoint to the chip instance.
* if a stream with the same endpoint already exists, append to it.
- * if not, create a new pcm stream.
+ * if not, create a new pcm stream. note, fp is added to the substream
+ * fmt_list and will be freed on the chip instance release. do not free
+ * fp or do remove it from the substream fmt_list to avoid double-free.
*/
int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
int stream,
@@ -675,6 +677,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
* (fp->maxpacksize & 0x7ff);
fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
fp->clock = clock;
+ INIT_LIST_HEAD(&fp->list);
/* some quirks for attributes here */
@@ -723,6 +726,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
err = snd_usb_add_audio_stream(chip, stream, fp);
if (err < 0) {
+ list_del(&fp->list); /* unlink for avoiding double-free */
kfree(fp->rate_table);
kfree(fp->chmap);
kfree(fp);
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index a8ab79556..a8c464402 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
WARNINGS = -Wall -Wextra
CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+
all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
%: %.c
$(CC) $(CFLAGS) -o $@ $^
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 813d9b272..48a1c5e7d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2101,11 +2101,11 @@ char *parse_events_formats_error_string(char *additional_terms)
/* valid terms */
if (additional_terms) {
- if (!asprintf(&str, "valid terms: %s,%s",
- additional_terms, static_terms))
+ if (asprintf(&str, "valid terms: %s,%s",
+ additional_terms, static_terms) < 0)
goto fail;
} else {
- if (!asprintf(&str, "valid terms: %s", static_terms))
+ if (asprintf(&str, "valid terms: %s", static_terms) < 0)
goto fail;
}
return str;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b597bcc8f..37b4f5070 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -284,13 +284,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
{
struct dirent *evt_ent;
DIR *event_dir;
- int ret = 0;
event_dir = opendir(dir);
if (!event_dir)
return -EINVAL;
- while (!ret && (evt_ent = readdir(event_dir))) {
+ while ((evt_ent = readdir(event_dir))) {
char path[PATH_MAX];
char *name = evt_ent->d_name;
FILE *file;
@@ -306,17 +305,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
snprintf(path, PATH_MAX, "%s/%s", dir, name);
- ret = -EINVAL;
file = fopen(path, "r");
- if (!file)
- break;
+ if (!file) {
+ pr_debug("Cannot open %s\n", path);
+ continue;
+ }
- ret = perf_pmu__new_alias(head, dir, name, file);
+ if (perf_pmu__new_alias(head, dir, name, file) < 0)
+ pr_debug("Cannot set up %s\n", name);
fclose(file);
}
closedir(event_dir);
- return ret;
+ return 0;
}
/*
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 183310376..c8680984d 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -22,6 +22,7 @@ cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+src_perf = getenv('srctree') + '/tools/perf'
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
@@ -30,6 +31,9 @@ libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
+# use full paths with source files
+ext_sources = map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)
+
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5af50c3dd..a84014e2e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -536,6 +536,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ spin_lock_init(&kvm->mmu_lock);
+ atomic_inc(&current->mm->mm_count);
+ kvm->mm = current->mm;
+ kvm_eventfd_init(kvm);
+ mutex_init(&kvm->lock);
+ mutex_init(&kvm->irq_lock);
+ mutex_init(&kvm->slots_lock);
+ atomic_set(&kvm->users_count, 1);
+ INIT_LIST_HEAD(&kvm->devices);
+
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_no_disable;
@@ -568,16 +578,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
goto out_err;
}
- spin_lock_init(&kvm->mmu_lock);
- kvm->mm = current->mm;
- atomic_inc(&kvm->mm->mm_count);
- kvm_eventfd_init(kvm);
- mutex_init(&kvm->lock);
- mutex_init(&kvm->irq_lock);
- mutex_init(&kvm->slots_lock);
- atomic_set(&kvm->users_count, 1);
- INIT_LIST_HEAD(&kvm->devices);
-
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err;
@@ -602,6 +602,7 @@ out_err_no_disable:
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
kvm_free_memslots(kvm, kvm->memslots[i]);
kvm_arch_free_vm(kvm);
+ mmdrop(current->mm);
return ERR_PTR(r);
}